repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/core/files/base.py | 147 | 4653 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.utils.encoding import smart_text
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import force_bytes, python_2_unicode_compatible
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif hasattr(self.file, 'name') and os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
elif hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
self._size = self.file.tell()
self.file.seek(pos)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = BytesIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
| mit |
goofwear/raspberry_pwn | src/pentest/fasttrack/setup.py | 16 | 11757 | #!/usr/bin/env python
import os
import sys
import time
import subprocess
import re
def get_basepath():
basepath = os.getcwd()
return basepath
definepath=get_basepath()
try:
if sys.argv[1]=='install':
print """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ *** Fast-Track Setup *** ~
~ *** Install Fast-Track dependencies *** ~
~ *** Version 2.1 *** ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fast-Track initial setup menu, you should use this if you are updating
Fast-Track at all due to added dependancies when writing new application
modules into Fast-Track.
Some things to note before running Fast-Track, you must install the following:
Metasploit (for autopwn, and mass client attack)
SQLite3 (for autopwn)
There are other requirements however, Fast-Track will check for them and if
your missing it, Fast-Track will install it for you.
NOTE: Some changes, pymssql is currently compiled at a lower level, higher levels
completely break Fast-Track at this point. Working on a solution to fix the overall
issues.
"""
# Check if we're root
if os.geteuid() != 0:
print "\nFast-Track v4 - A new beginning...\n\n"
print "Fast-Track Setup is not running under root. Please re-run the tool under root...\n"
sys.exit(1)
#print "Metasploit directory example: /pentest/exploits/framework3/"
#print "\nMake sure you do /folder/ with the / at the end or it'll\njack some stuff up."
try:
print "[*] Ensure that you configure the Metasploit path in bin/config/config\n"
#metasploitpath=raw_input("\nEnter the path to the metasploit directory\nHit enter for default (/pentest/exploits/framework3/): ")
#if metasploitpath=='':
# metasploitpath="/pentest/exploits/framework3/"
#if os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit files detected, moving on..."
#if not os.path.isfile("%smsfconsole" % (metasploitpath)): print "Metasploit not detected in path specified. You should re-run setup and specify the correct path."
#writefile=file("%s/bin/setup/metasploitconfig.file" % (definepath),'w')
#writefile.write("%s" % (metasploitpath))
#writefile.close()
#print "*** Metasploit directory set..... ***\n"
print "No guarantee this will successfully install all dependancies correctly\nyou may need to install some manually..\n\nDifferent Linux OS require different things to work.\n"
installstuff=raw_input("Would you like to attempt all dependancies, yes or no: ")
# Thanks to swc|666 for the help below
if installstuff=='yes':
print '[-] Installing requirements needed for Fast-Track.. [-]'
print '\n[-] Detecting Linux version... [-]'
time.sleep(2)
if os.path.isfile("/etc/apt/sources.list"):
### Not every sources.list file presence indicates Ubuntu (this works on all flavors of Ubuntu, Debian and Sidux @least)
if os.path.isfile("/etc/lsb-release"):
pat=re.compile("=|\"",re.M|re.DOTALL)
distro=open("/etc/lsb-release").read()
distro=pat.sub("",distro).split("\n")
distro=[i.strip() for i in distro if i.strip() != '' ]
for n,items in enumerate(distro):
if "DISTRIB_DESCRIPTION" in items:
d1 = distro[n+0]
d2 = d1.strip("DISTRIB_DESCRIPTION")
d3 = "\n[-] " "%s " "Detected [-]\n" % (d2)
#print d3
print "[-] Installing requirements to run on " "%s" "! [-]" % (d2)
# else:
### A sources.list and not a lsb-release file? >.<
print '\n[-] Debian-Based OS Detected [-]\n'
print '[-] Installing requirements! [-]'
print "Installing Subversion, Build-Essential, Python-ClientForm, FreeTds-Dev, PExpect, and Python2.5-Dev, PYMILLS, through Apt, please wait.."
subprocess.Popen("apt-get --force-yes -y install subversion build-essential vncviewer nmap python-clientform python2.6-dev python-pexpect python-setuptools", shell=True).wait()
subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait()
print '[-] Running ldconfig.... [-]'
subprocess.Popen("ldconfig", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf pymssql-0.8.0.tar.gz;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf pymssql*", shell=True).wait()
subprocess.Popen('wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz#md5=5741d4a5c30aaed5def2f4b4f86e92a9;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/; python setup.py install', shell=True).wait()
subprocess.Popen('rm -rf pymills; rm -rf pymills-3.4.tar.gz', shell=True).wait()
print "Installing BeautifulSoup Python Module"
subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait()
print "BeautifulSoup Installed."
# Taken from http://wiredbytes.com/node/5
metasploitinstall=raw_input("\nWould you like Fast-Track to install Metasploit 3 for you (experimental)? yes or no: ")
if metasploitinstall == 'yes':
subprocess.Popen("apt-get install build-essential ruby libruby rdoc libyaml-ruby libzlib-ruby libopenssl-ruby libdl-ruby libreadline-ruby libiconv-ruby libgtk2-ruby libglade2-ruby subversion sqlite3 libsqlite3-ruby irb", shell=True).wait()
subprocess.Popen("wget -c http://rubyforge.org/frs/download.php/70696/rubygems-1.3.7.tgz;tar -xvzf rubygems-1.3.7.tgz -C /tmp/;cd /tmp/rubygems-1.3.7/;ruby setup.rb", shell=True).wait()
subprocess.Popen("/usr/bin/gem1.8 install rails", shell=True).wait()
subprocess.Popen("rm rubygems-1.3.7.tgz", shell=True).wait()
subprocess.Popen("mkdir /pentest/exploits/framework3;cd /pentest/exploits/framework/;svn co http://metasploit.com/svn/framework3/trunk/ ." , shell=True).wait()
print "Metasploit should have been installed..running ldconfig"
ldconfig=subprocess.Popen("ldconfig").wait()
else:
print "[-] Generic Linux OS detected! [-] \n[-] Installing vanilla installation for dependancies [-]"
print '[-] Installing FreeTDS and PYMMSQL [-]'
subprocess.Popen("wget http://ibiblio.org/pub/Linux/ALPHA/freetds/stable/freetds-stable.tgz", shell=True).wait()
subprocess.Popen("wget http://downloads.sourceforge.net/pymssql/pymssql-0.8.0.tar.gz", shell=True).wait()
subprocess.Popen("tar -zxvf freetds-stable.tgz;tar -zxvf pymssql-0.8.0.tar.gz;cd freetds-0.*;./configure --enable-msdblib --with-tdsver=8.0 && make && make install; cd ..;cd pymssql-0.8.0;ln -s /usr/local/lib/libsysbdb.so.5 /usr/lib;python setup.py install;cd ..;rm -rf freetds*;rm -rf pymssql*", shell=True).wait()
print '[-] Running ldconfig.... [-]'
subprocess.Popen("ldconfig", shell=True).wait()
print '[-] Finished..moving on.. [-]'
time.sleep(2)
print 'Installing Module for Python Called "PExpect"'
subprocess.Popen('wget http://downloads.sourceforge.net/pexpect/pexpect-2.3.tar.gz;tar -zxvf pexpect-2.3.tar.gz;cd pexpect-2.3;python setup.py install;cd ..;rm -rf pexpect-2.3;rm pexpect-2.3.tar.gz', shell=True).wait()
print 'Installed! Moving on...'
print 'Installing SQLite3'
subprocess.Popen('cd /usr/local/bin/;ln -s tclsh8.4 tclsh', shell=True).wait()
subprocess.Popen('wget http://www.sqlite.org/sqlite-3.7.0.1.tar.gz;tar -zxvf sqlite-3.7.0.1;cd sqlite-3.7.0.1;./configure --prefix=/usr/local && make && make install;cd ..;rm sqlite-3.7.0.1.tar.gz;rm -rf sqlite-3.7.0.1', shell=True).wait()
subprocess.Popen('wget http://rubyforge.org/frs/download.php/2820/sqlite-ruby-2.2.3.tar.gz;tar -zxvf sqlite3-ruby-2.2.3.tar.gz;cd sqlite3-ruby-2.2.3;ruby setup.rb config;ruby setup.rb setup;ruby setup.rb install;cd ..;rm sqlite3-ruby-2.2.3.tar.gz;rm -rf sqlite3-ruby-2.2.3', shell=True).wait()
print 'SQLite3 installed..Moving on...'
print "Installing ClientForm Python Module"
subprocess.Popen("svn co http://codespeak.net/svn/wwwsearch/ClientForm/trunk ClientForm;cd ClientForm;python setup.py install;cd ..;rm -rf ClientForm", shell=True).wait()
print "ClientForm Installed, moving on.."
print "Installing PROFTPD"
subprocess.Popen("""wget ftp://ftp.proftpd.org/distrib/source/proftpd-1.3.3a.tar.gz;tar -zxvf proftpd-1.3.3a.tar.gz;cd proftpd-1.3.*/;./configure && make && make install;cd ..;rm -rf proftpd*;echo "UseReverseDNS off" >> /usr/local/etc/proftpd.conf;echo "IdentLookups off" >> /usr/local/etc/proftpd.conf;killall proftpd""", shell=True).wait()
print "PROFRPD installed..Moving on..."
print "Installing PyMills"
subprocess.Popen('python setuptools.py;wget http://pypi.inqbus.de/pymills/pymills-3.4.tar.gz;tar -zxvf pymills-3.4.tar.gz;mv pymills-3.4 pymills;cd pymills/;python setup.py install;cd ..;rm -rf pymills*', shell=True).wait()
print "PyMills installed..Moving on..."
print "Installing BeautifulSoup..."
subprocess.Popen("wget http://www.crummy.com/software/BeautifulSoup/download/BeautifulSoup.tar.gz;tar -zxvf BeautifulSoup.tar.gz;cd BeautifulSoup*;python setup.py install;cd ..;rm -rf BeautifulSoup*", shell=True).wait()
print "BeautifulSoup installed..Moving on..."
print "Finished with installations..."
print "Running ldconfig to wrap up everything..."
subprocess.Popen("ldconfig", shell=True).wait()
print "\n[-] Finished with setup [-]\n[-] Try running Fast-Track now. [-]\n[-] If unsucessful, manually compile from source the deps. [-]"
print "[-] Re-checking dependencies... [-]"
try:
sys.path.append("%s/bin/setup/" % (definepath))
import depend
print "\n"
print "Finished..running ldconfig to wrap everything up...\n"
ldconfig=subprocess.Popen("ldconfig", shell=True)
print "Fast-Track setup exiting...\n"
except ImportError:
print "Error importing dependancy checker."
except KeyboardInterrupt:
print "\n\nExiting Fast-Track setup...\n"
except IndexError:
print """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~ ~
~ Fast-Track Setup and Installation ~
~ ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This script will allow you to install the required
dependencies needed for Fast-Track to function
correctly. Note this does not install Metasploit for
you. If you want to use the automated autopwn
functionality within Metasploit, you will need to
install that yourself.
Usage: python setup.py install
"""
| gpl-3.0 |
StephanieMak/hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py | 323 | 3444 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
pat = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *')
counterPat = re.compile('(?P<name>[^:]+):(?P<value>[^,]*),?')
def parse(tail):
result = {}
for n,v in re.findall(pat, tail):
result[n] = v
return result
mapStartTime = {}
mapEndTime = {}
reduceStartTime = {}
reduceShuffleTime = {}
reduceSortTime = {}
reduceEndTime = {}
reduceBytes = {}
for line in sys.stdin:
words = line.split(" ",1)
event = words[0]
attrs = parse(words[1])
if event == 'MapAttempt':
if attrs.has_key("START_TIME"):
mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000
elif attrs.has_key("FINISH_TIME"):
mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'ReduceAttempt':
if attrs.has_key("START_TIME"):
reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000
elif attrs.has_key("FINISH_TIME"):
reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000
reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000
reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'Task':
if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"):
for n,v in re.findall(counterPat, attrs["COUNTERS"]):
if n == "File Systems.HDFS bytes written":
reduceBytes[attrs["TASKID"]] = int(v)
runningMaps = {}
shufflingReduces = {}
sortingReduces = {}
runningReduces = {}
startTime = min(reduce(min, mapStartTime.values()),
reduce(min, reduceStartTime.values()))
endTime = max(reduce(max, mapEndTime.values()),
reduce(max, reduceEndTime.values()))
reduces = reduceBytes.keys()
reduces.sort()
print "Name reduce-output-bytes shuffle-finish reduce-finish"
for r in reduces:
print r, reduceBytes[r], reduceShuffleTime[r] - startTime,
print reduceEndTime[r] - startTime
print
for t in range(startTime, endTime):
runningMaps[t] = 0
shufflingReduces[t] = 0
sortingReduces[t] = 0
runningReduces[t] = 0
for map in mapStartTime.keys():
for t in range(mapStartTime[map], mapEndTime[map]):
runningMaps[t] += 1
for reduce in reduceStartTime.keys():
for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]):
shufflingReduces[t] += 1
for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]):
sortingReduces[t] += 1
for t in range(reduceSortTime[reduce], reduceEndTime[reduce]):
runningReduces[t] += 1
print "time maps shuffle merge reduce"
for t in range(startTime, endTime):
print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t],
print runningReduces[t]
| apache-2.0 |
fengbaicanhe/intellij-community | python/lib/Lib/xml/FtCore.py | 132 | 1806 | """
Contains various definitions common to modules acquired from 4Suite
"""
__all__ = ["FtException", "get_translator"]
class FtException(Exception):
def __init__(self, errorCode, messages, args):
# By defining __str__, args will be available. Otherwise
# the __init__ of Exception sets it to the passed in arguments.
self.params = args
self.errorCode = errorCode
self.message = messages[errorCode] % args
Exception.__init__(self, self.message, args)
def __str__(self):
return self.message
# What follows is used to provide support for I18N in the rest of the
# 4Suite-derived packages in PyXML.
#
# Each sub-package of the top-level "xml" package that contains 4Suite
# code is really a separate text domain, but they're all called
# '4Suite'. For each domain, a translation object is provided using
# message catalogs stored inside the package. The code below defines
# a get_translator() function that returns an appropriate gettext
# function to be used as _() in the sub-package named by the
# parameter. This handles all the compatibility issues related to
# Python versions (whether the gettext module can be found) and
# whether the message catalogs can actually be found.
def _(msg):
return msg
try:
import gettext
except (ImportError, IOError):
def get_translator(pkg):
return _
else:
import os
_cache = {}
_top = os.path.dirname(os.path.abspath(__file__))
def get_translator(pkg):
if not _cache.has_key(pkg):
locale_dir = os.path.join(_top, pkg.replace(".", os.sep))
try:
f = gettext.translation('4Suite', locale_dir).gettext
except IOError:
f = _
_cache[pkg] = f
return _cache[pkg]
| apache-2.0 |
louisq/staticguru | utility/artifact_archiver.py | 1 | 4495 | """
The MIT License (MIT)
Copyright (c) 2016 Louis-Philippe Querel [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import glob
import os
import shutil
from Logging import logger
"""
The purpose of this utility is to clone the artifacts that have been generated through the build process to preserve them
This version would probably only work for maven run projects
"""
FILTERED_EXTENSIONS = ('*.jar', '*.tar.*', '*.zip', '*.rpm')
# todo replace with an abstract solution that could be reused for the other modules to log the version that was ran
artifact_archiver_version = 1
def archive(repo_path, archive_path, repo_id, commit, filter_extensions=True):
# Determine if we can access the path where the archive should be
if not _determine_access(archive_path):
logger.error("Failed to save to archive %s" % archive_path)
return False
temp_archive = os.path.join(repo_path, "%s-temp" % commit)
temp_archive_compress_file_no_ext = os.path.join(temp_archive, commit)
temp_archive_compress_file = "%s.tar.gz" % temp_archive_compress_file_no_ext
archive_repo_path = os.path.join(archive_path, repo_id)
archive_compress_file = "%s.tar.gz" % os.path.join(archive_repo_path, commit)
_clear_archive(temp_archive, archive_compress_file)
target_directories = _identify_target_directories(repo_path)
_clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions=filter_extensions)
_compress_files(temp_archive, temp_archive_compress_file_no_ext)
_move_compress_file_to_archive(archive_repo_path, temp_archive_compress_file)
# Delete the temporary folder
_clear_archive_temp(temp_archive)
return True
def _determine_access(archive_path):
return os.path.exists(archive_path)
def _clear_archive(archive_temp, archive_compress_file):
_clear_archive_temp(archive_temp)
if os.path.exists(archive_compress_file):
os.remove(archive_compress_file)
def _clear_archive_temp(temp_archive):
if os.path.exists(temp_archive):
shutil.rmtree(temp_archive)
def _identify_target_directories(repo_path):
folder = "target"
nesting = "**/"
target_directories = glob.glob(r'%s%s' % (repo_path, folder))
compound_nesting = ""
# We need to navigate the repository to find project target folders
for count in range(5):
compound_nesting += nesting
target_directories += glob.glob(r'%s%s%s' % (repo_path, compound_nesting, folder))
return target_directories
def _clone_files_in_targets(repo_path, temp_archive, target_directories, filter_extensions):
# Determine if we need to filter any of the files
if filter_extensions:
ignore = shutil.ignore_patterns(FILTERED_EXTENSIONS)
else:
ignore = None
for path in target_directories:
folder = path[len(repo_path):]
shutil.copytree(path, "%s/%s" % (temp_archive, folder), ignore=ignore, symlinks=True)
def _compress_files(archive_temp, temp_archive_compress_file_no_ext):
# If the compression is changed the file extension needs to be changed as well in the parent method
shutil._make_tarball(temp_archive_compress_file_no_ext, archive_temp, compress="gzip")
def _move_compress_file_to_archive(repo_archive_path, temp_archive_compress_file):
if not os.path.exists(repo_archive_path):
os.makedirs(repo_archive_path)
shutil.move(temp_archive_compress_file, repo_archive_path)
| mit |
digetx/picasso_upstream_support | Documentation/target/tcm_mod_builder.py | 215 | 36866 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
buf += " &tpg->se_tpg, tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = " + fabric_mod_name + ",\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
s0enke/boto | boto/logs/layer1.py | 146 | 22588 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.logs import exceptions
from boto.compat import json
class CloudWatchLogsConnection(AWSQueryConnection):
"""
Amazon CloudWatch Logs Service API Reference
This is the Amazon CloudWatch Logs API Reference . Amazon
CloudWatch Logs is a managed service for real time monitoring and
archival of application logs. This guide provides detailed
information about Amazon CloudWatch Logs actions, data types,
parameters, and errors. For detailed information about Amazon
CloudWatch Logs features and their associated API calls, go to the
`Amazon CloudWatch Logs Developer Guide`_.
Use the following links to get started using the Amazon CloudWatch
API Reference :
+ `Actions`_: An alphabetical list of all Amazon CloudWatch Logs
actions.
+ `Data Types`_: An alphabetical list of all Amazon CloudWatch
Logs data types.
+ `Common Parameters`_: Parameters that all Query actions can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
+ `Regions and Endpoints`_: Itemized regions and endpoints for all
AWS products.
In addition to using the Amazon CloudWatch Logs API, you can also
use the following SDKs and third-party libraries to access Amazon
CloudWatch Logs programmatically.
+ `AWS SDK for Java Documentation`_
+ `AWS SDK for .NET Documentation`_
+ `AWS SDK for PHP Documentation`_
+ `AWS SDK for Ruby Documentation`_
Developers in the AWS developer community also provide their own
libraries, which you can find at the following AWS developer
centers:
+ `AWS Java Developer Center`_
+ `AWS PHP Developer Center`_
+ `AWS Python Developer Center`_
+ `AWS Ruby Developer Center`_
+ `AWS Windows and .NET Developer Center`_
"""
APIVersion = "2014-03-28"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com"
ServiceName = "CloudWatchLogs"
TargetPrefix = "Logs_20140328"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ServiceUnavailableException": exceptions.ServiceUnavailableException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException,
"OperationAbortedException": exceptions.OperationAbortedException,
"InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudWatchLogsConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_log_group(self, log_group_name):
"""
Creates a new log group with the specified name. The name of
the log group must be unique within a region for an AWS
account. You can create up to 100 log groups per account.
You must use the following guidelines when naming a log group:
+ Log group names can be between 1 and 512 characters long.
+ Allowed characters are az, AZ, 09, '_' (underscore), '-'
(hyphen), '/' (forward slash), and '.' (period).
Log groups are created with a default retention of 14 days.
The retention attribute allow you to configure the number of
days you want to retain log events in the specified log group.
See the `SetRetention` operation on how to modify the
retention of your log groups.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='CreateLogGroup',
body=json.dumps(params))
def create_log_stream(self, log_group_name, log_stream_name):
"""
Creates a new log stream in the specified log group. The name
of the log stream must be unique within the log group. There
is no limit on the number of log streams that can exist in a
log group.
You must use the following guidelines when naming a log
stream:
+ Log stream names can be between 1 and 512 characters long.
+ The ':' colon character is not allowed.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='CreateLogStream',
body=json.dumps(params))
def delete_log_group(self, log_group_name):
"""
Deletes the log group with the specified name. Amazon
CloudWatch Logs will delete a log group only if there are no
log streams and no metric filters associated with the log
group. If this condition is not satisfied, the request will
fail and the log group will not be deleted.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteLogGroup',
body=json.dumps(params))
def delete_log_stream(self, log_group_name, log_stream_name):
"""
Deletes a log stream and permanently deletes all the archived
log events associated with it.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='DeleteLogStream',
body=json.dumps(params))
def delete_metric_filter(self, log_group_name, filter_name):
"""
Deletes a metric filter associated with the specified log
group.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
}
return self.make_request(action='DeleteMetricFilter',
body=json.dumps(params))
def delete_retention_policy(self, log_group_name):
"""
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteRetentionPolicy',
body=json.dumps(params))
def describe_log_groups(self, log_group_name_prefix=None,
next_token=None, limit=None):
"""
Returns all the log groups that are associated with the AWS
account making the request. The list returned in the response
is ASCII-sorted by log group name.
By default, this operation returns up to 50 log groups. If
there are more log groups to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log groups returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name_prefix: string
:param log_group_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogGroups` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {}
if log_group_name_prefix is not None:
params['logGroupNamePrefix'] = log_group_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogGroups',
body=json.dumps(params))
def describe_log_streams(self, log_group_name,
log_stream_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the log streams that are associated with the
specified log group. The list returned in the response is
ASCII-sorted by log stream name.
By default, this operation returns up to 50 log streams. If
there are more log streams to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log streams returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name_prefix: string
:param log_stream_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogStreams` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if log_stream_name_prefix is not None:
params['logStreamNamePrefix'] = log_stream_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogStreams',
body=json.dumps(params))
def describe_metric_filters(self, log_group_name,
filter_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the metrics filters associated with the specified
log group. The list returned in the response is ASCII-sorted
by filter name.
By default, this operation returns up to 50 metric filters. If
there are more metric filters to list, the response would
contain a `nextToken` value in the response body. You can also
limit the number of metric filters returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type filter_name_prefix: string
:param filter_name_prefix: The name of the metric filter.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeMetricFilters` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if filter_name_prefix is not None:
params['filterNamePrefix'] = filter_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeMetricFilters',
body=json.dumps(params))
def get_log_events(self, log_group_name, log_stream_name,
start_time=None, end_time=None, next_token=None,
limit=None, start_from_head=None):
"""
Retrieves log events from the specified log stream. You can
provide an optional time range to filter the results on the
event `timestamp`.
By default, this operation returns as much log events as can
fit in a response size of 1MB, up to 10,000 log events. The
response will always include a `nextForwardToken` and a
`nextBackwardToken` in the response body. You can use any of
these tokens in subsequent `GetLogEvents` requests to paginate
through events in either forward or backward direction. You
can also limit the number of log events returned in the
response by specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type start_time: long
:param start_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type end_time: long
:param end_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
`nextForwardToken` or `nextBackwardToken` fields in the response of
the previous `GetLogEvents` request.
:type limit: integer
:param limit: The maximum number of log events returned in the
response. If you don't specify a value, the request would return as
much log events as can fit in a response size of 1MB, up to 10,000
log events.
:type start_from_head: boolean
:param start_from_head:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
if start_time is not None:
params['startTime'] = start_time
if end_time is not None:
params['endTime'] = end_time
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
if start_from_head is not None:
params['startFromHead'] = start_from_head
return self.make_request(action='GetLogEvents',
body=json.dumps(params))
def put_log_events(self, log_group_name, log_stream_name, log_events,
sequence_token=None):
"""
Uploads a batch of log events to the specified log stream.
Every PutLogEvents request must include the `sequenceToken`
obtained from the response of the previous request. An upload
in a newly created log stream does not require a
`sequenceToken`.
The batch of events must satisfy the following constraints:
+ The maximum batch size is 32,768 bytes, and this size is
calculated as the sum of all event messages in UTF-8, plus 26
bytes for each log event.
+ None of the log events in the batch can be more than 2 hours
in the future.
+ None of the log events in the batch can be older than 14
days or the retention period of the log group.
+ The log events in the batch must be in chronological ordered
by their `timestamp`.
+ The maximum number of log events in a batch is 1,000.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type log_events: list
:param log_events: A list of events belonging to a log stream.
:type sequence_token: string
:param sequence_token: A string token that must be obtained from the
response of the previous `PutLogEvents` request.
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'logEvents': log_events,
}
if sequence_token is not None:
params['sequenceToken'] = sequence_token
return self.make_request(action='PutLogEvents',
body=json.dumps(params))
def put_metric_filter(self, log_group_name, filter_name, filter_pattern,
metric_transformations):
"""
Creates or updates a metric filter and associates it with the
specified log group. Metric filters allow you to configure
rules to extract metric data from log events ingested through
`PutLogEvents` requests.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
:type filter_pattern: string
:param filter_pattern:
:type metric_transformations: list
:param metric_transformations:
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
'filterPattern': filter_pattern,
'metricTransformations': metric_transformations,
}
return self.make_request(action='PutMetricFilter',
body=json.dumps(params))
def put_retention_policy(self, log_group_name, retention_in_days):
"""
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='PutRetentionPolicy',
body=json.dumps(params))
def set_retention(self, log_group_name, retention_in_days):
"""
Sets the retention of the specified log group. Log groups are
created with a default retention of 14 days. The retention
attribute allow you to configure the number of days you want
to retain log events in the specified log group.
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='SetRetention',
body=json.dumps(params))
def test_metric_filter(self, filter_pattern, log_event_messages):
"""
Tests the filter pattern of a metric filter against a sample
of log event messages. You can use this operation to validate
the correctness of a metric filter pattern.
:type filter_pattern: string
:param filter_pattern:
:type log_event_messages: list
:param log_event_messages:
"""
params = {
'filterPattern': filter_pattern,
'logEventMessages': log_event_messages,
}
return self.make_request(action='TestMetricFilter',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
MCFlowMace/Wordom | src/setup.py | 1 | 1423 | #! /usr/bin/env python
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy
import numpy
# in order to check whether lapack are present ...
import numpy.distutils.system_info as sysinfo
# Obtain the numpy include directory. This works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# wordom extension module
if len(sysinfo.get_info('lapack')) == 0:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
)
else:
_wordom = Extension("_wordom",
["wordom.i","fileio.c","tools.c","qcprot.c", "xdrfile.c", "xdrfile_xtc.c"],
include_dirs = [numpy_include],
extra_compile_args = ["-D LAPACK"],
libraries = [ 'lapack', 'blas' ]
)
# NumyTypemapTests setup
setup( name = "wordom",
description = "wordom is a molecular structure and data manipulation program/library",
author = "Michele Seeber & colleagues",
url = "http://wordom.sf.net",
author_email= "[email protected]",
license = "GPL",
version = "0.23",
ext_modules = [_wordom],
py_modules = ['wordom']
)
| gpl-3.0 |
flask-admin/flask-admin | flask_admin/model/ajax.py | 53 | 1076 | DEFAULT_PAGE_SIZE = 10
class AjaxModelLoader(object):
"""
Ajax related model loader. Override this to implement custom loading behavior.
"""
def __init__(self, name, options):
"""
Constructor.
:param name:
Field name
"""
self.name = name
self.options = options
def format(self, model):
"""
Return (id, name) tuple from the model.
"""
raise NotImplementedError()
def get_one(self, pk):
"""
Find model by its primary key.
:param pk:
Primary key value
"""
raise NotImplementedError()
def get_list(self, query, offset=0, limit=DEFAULT_PAGE_SIZE):
"""
Return models that match `query`.
:param view:
Administrative view.
:param query:
Query string
:param offset:
Offset
:param limit:
Limit
"""
raise NotImplementedError()
| bsd-3-clause |
psdh/servo | components/script/dom/bindings/codegen/parser/tests/test_builtins.py | 276 | 1798 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestBuiltins {
attribute boolean b;
attribute byte s8;
attribute octet u8;
attribute short s16;
attribute unsigned short u16;
attribute long s32;
attribute unsigned long u32;
attribute long long s64;
attribute unsigned long long u64;
attribute DOMTimeStamp ts;
};
""")
results = parser.finish()
harness.ok(True, "TestBuiltins interface parsed without error.")
harness.check(len(results), 1, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
iface = results[0]
harness.check(iface.identifier.QName(), "::TestBuiltins", "Interface has the right QName")
harness.check(iface.identifier.name, "TestBuiltins", "Interface has the right name")
harness.check(iface.parent, None, "Interface has no parent")
members = iface.members
harness.check(len(members), 10, "Should be one production")
names = ["b", "s8", "u8", "s16", "u16", "s32", "u32", "s64", "u64", "ts"]
types = ["Boolean", "Byte", "Octet", "Short", "UnsignedShort", "Long", "UnsignedLong", "LongLong", "UnsignedLongLong", "UnsignedLongLong"]
for i in range(10):
attr = members[i]
harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Should be an IDLAttribute")
harness.check(attr.identifier.QName(), "::TestBuiltins::" + names[i], "Attr has correct QName")
harness.check(attr.identifier.name, names[i], "Attr has correct name")
harness.check(str(attr.type), types[i], "Attr type is the correct name")
harness.ok(attr.type.isPrimitive(), "Should be a primitive type")
| mpl-2.0 |
benob/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/__init__.py | 9 | 3871 | # Natural Language Toolkit: Classifiers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for labeling tokens with category labels (or
X{class labels}). Typically, labels are represented with strings
(such as C{'health'} or C{'sports'}). Classifiers can be used to
perform a wide range of classification tasks. For example,
classifiers can be used...
- to classify documents by topic.
- to classify ambiguous words by which word sense is intended.
- to classify acoustic signals by which phoneme they represent.
- to classify sentences by their author.
Features
--------
In order to decide which category label is appropriate for a given
token, classifiers examine one or more 'features' of the token. These
X{features} are typically chosen by hand, and indicate which aspects
of the token are relevant to the classification decision. For
example, a document classifier might use a separate feature for each
word, recording how often that word occured in the document.
Featuresets
-----------
The features describing a token are encoded using a X{featureset},
which is a dictionary that maps from X{feature names} to X{feature
values}. Feature names are unique strings that indicate what aspect
of the token is encoded by the feature. Examples include
C{'prevword'}, for a feature whose value is the previous word; and
C{'contains-word(library)'} for a feature that is true when a document
contains the word C{'library'}. Feature values are typically
booleans, numbers, or strings, depending on which feature they
describe.
Featuresets are typically constructed using a X{feature
extraction function}, which takes a token as its input, and returns a
featuresets describing that token. This feature extraction
function is applied to each token before it is fed to the classifier:
>>> # Define a feature extraction function.
>>> def document_features(document):
... return dict([('contains-word(%s)'%w,True) for w in document])
>>> Classify each Gutenberg document.
>>> for file in gutenberg.files():
... doc = gutenberg.tokenized(file)
... print doc_name, classifier.classify(document_features(doc))
Training Classifiers
--------------------
Most classifiers are built by training them on a list of hand-labeled
examples, known as the X{training set}. Training sets are represented
as lists of C{(featuredict, label)} tuples.
"""
from api import *
from util import *
from naivebayes import *
from decisiontree import *
from weka import *
from nltk.internals import deprecated, Deprecated
__all__ = [
# Classifier Interfaces
'ClassifierI', 'MultiClassifierI',
# Classifiers
'NaiveBayesClassifier', 'DecisionTreeClassifier', 'WekaClassifier',
# Utility functions. Note that accuracy() is intentionally
# omitted -- it should be accessed as nltk.classify.accuracy();
# similarly for log_likelihood() and attested_labels().
'config_weka',
# Demos -- not included.
]
try:
import numpy
from maxent import *
__all__ += ['ConditionalExponentialClassifier', 'train_maxent_classifier',]
except ImportError:
pass
######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ClassifyI(ClassifierI, Deprecated):
"""Use nltk.ClassifierI instead."""
@deprecated("Use nltk.classify.accuracy() instead.")
def classifier_accuracy(classifier, gold):
return accuracy(classifier, gold)
@deprecated("Use nltk.classify.log_likelihood() instead.")
def classifier_log_likelihood(classifier, gold):
return log_likelihood(classifier, gold)
| gpl-3.0 |
LoHChina/nova | nova/tests/functional/v3/test_migrate_server.py | 27 | 3462 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.conductor import manager as conductor_manager
from nova import db
from nova.tests.functional.v3 import test_servers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-migrate-server"
ctype = 'json'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(MigrateServerSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.admin_actions.'
'Admin_actions')
return f
def setUp(self):
"""setUp Method for MigrateServer api samples extension
This method creates the server that will be used in each tests
"""
super(MigrateServerSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
@mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
def test_post_migrate(self, mock_cold_migrate):
# Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'migrate-server', {})
self.assertEqual(202, response.status_code)
def test_post_live_migrate_server(self):
# Get api samples to server live migrate request.
def fake_live_migrate(_self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
self.assertEqual(self.uuid, instance["uuid"])
host = scheduler_hint["host"]
self.assertEqual(self.compute.host, host)
self.stubs.Set(conductor_manager.ComputeTaskManager,
'_live_migrate',
fake_live_migrate)
def fake_get_compute(context, host):
service = dict(host=host,
binary='nova-compute',
topic='compute',
report_count=1,
updated_at='foo',
hypervisor_type='bar',
hypervisor_version=utils.convert_version_to_int(
'1.0'),
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'live-migrate-server',
{'hostname': self.compute.host})
self.assertEqual(202, response.status_code)
| apache-2.0 |
montefra/dodocs | dodocs/__init__.py | 1 | 1068 | """Main function
Copyright (c) 2015 Francesco Montesano
MIT Licence
"""
import os
import sys
from dodocs.cmdline import parse
import dodocs.logger as dlog
__version__ = "0.0.1"
def main(argv=None):
"""
Main code
Parameters
----------
argv : list of strings, optional
command line arguments
"""
args = parse(argv=argv)
dlog.setLogger(args)
# make sure to reset the subcommand name
log = dlog.getLogger()
if "func" in args:
args.func(args)
log.debug("Finished")
return 0
else:
# defaults profile to list
if args.subparser_name == 'profile' and args.profile_cmd is None:
main(sys.argv[1:] + ["list"])
else:
# in the other cases suggest to run -h
msg = ("Please provide a valid command.\n"
"Type\n " + os.path.split(sys.argv[0])[1])
if args.subparser_name is not None:
msg += " " + args.subparser_name
msg += ' -h'
log.error(msg)
return 1
| mit |
mozilla/kitsune | kitsune/wiki/permissions.py | 1 | 4844 | import logging
from django.conf import settings
log = logging.getLogger("k.wiki")
# Why is this a mixin if it can only be used for the Document model?
# Good question! My only good reason is to keep the permission related
# code organized and contained in one place.
class DocumentPermissionMixin(object):
"""Adds of permission checking methods to the Document model."""
def allows(self, user, action):
"""Check if the user has the permission on the document."""
# If this is kicking up a KeyError it's probably because you typoed!
return getattr(self, "_allows_%s" % action)(user)
def _allows_create_revision(self, user):
"""Can the user create a revision for the document?"""
# For now (ever?), creating revisions isn't restricted at all.
return True
def _allows_edit(self, user):
"""Can the user edit the document?"""
# Document editing isn't restricted until it has an approved
# revision.
if not self.current_revision:
return True
# Locale leaders and reviewers can edit in their locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# And finally, fallback to the actual django permission.
return user.has_perm("wiki.change_document")
def _allows_delete(self, user):
"""Can the user delete the document?"""
# Locale leaders can delete documents in their locale.
locale = self.locale
if _is_leader(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_document")
def _allows_archive(self, user):
"""Can the user archive the document?"""
# Just use the django permission.
return user.has_perm("wiki.archive_document")
def _allows_edit_keywords(self, user):
"""Can the user edit the document's keywords?"""
# If the document is in the default locale, just use the
# django permission.
# Editing keywords isn't restricted in other locales.
return self.locale != settings.WIKI_DEFAULT_LANGUAGE or user.has_perm("wiki.edit_keywords")
def _allows_edit_needs_change(self, user):
"""Can the user edit the needs change fields for the document?"""
# If the document is in the default locale, just use the
# django permission.
# Needs change isn't used for other locales (yet?).
return self.locale == settings.WIKI_DEFAULT_LANGUAGE and user.has_perm(
"wiki.edit_needs_change"
)
def _allows_mark_ready_for_l10n(self, user):
""""Can the user mark the document as ready for localization?"""
# If the document is localizable and the user has the django
# permission, then the user can mark as ready for l10n.
return self.is_localizable and user.has_perm("wiki.mark_ready_for_l10n")
def _allows_review_revision(self, user):
"""Can the user review a revision for the document?"""
# Locale leaders and reviewers can review revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.review_revision")
def _allows_delete_revision(self, user):
"""Can the user delete a document's revisions?"""
# Locale leaders and reviewers can delete revisions in their
# locale.
locale = self.locale
if _is_leader(locale, user) or _is_reviewer(locale, user):
return True
# Fallback to the django permission.
return user.has_perm("wiki.delete_revision")
def _is_leader(locale, user):
"""Checks if the user is a leader for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.leaders.all()
def _is_reviewer(locale, user):
"""Checks if the user is a reviewer for the given locale.
Returns False if the locale doesn't exist. This will should only happen
if we forgot to insert a new locale when enabling it or during testing.
"""
from kitsune.wiki.models import Locale
try:
locale_team = Locale.objects.get(locale=locale)
except Locale.DoesNotExist:
log.warning("Locale not created for %s" % locale)
return False
return user in locale_team.reviewers.all()
| bsd-3-clause |
dwaynebailey/translate | translate/lang/zh_tw.py | 3 | 1116 | # -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Chinese language (traditional).
.. seealso:: http://en.wikipedia.org/wiki/Chinese_language
"""
from __future__ import unicode_literals
from translate.lang.zh import zh
class zh_tw(zh):
specialchars = "←→↔×÷©…—‘’“”「」『』【】《》"
ignoretests = {
'all': ["acronyms", "simplecaps", "startcaps"],
}
| gpl-2.0 |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| mit |
aussendorf/bareos-fd-python-plugins | plugin/BareosFdPluginBaseclass.py | 1 | 5778 | #This file is now part of the main Bareos repo. Do not use this version, use the package bareos-filedaemon-python-plugin instead
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Baseclass for Bareos python plugins
# Functions taken and adapted from bareos-fd.py
# (c) Bareos GmbH & Co. KG, Maik Aussendorf
# AGPL v.3
from bareosfd import *
from bareos_fd_consts import *
from io import open
from os import O_WRONLY, O_CREAT
class BareosFdPluginBaseclass:
''' Bareos python plugin base class '''
def __init__(self, context, plugindef):
DebugMessage(context, 100, "Constructor called in module " + __name__ + "\n");
events = [];
events.append(bEventType['bEventJobEnd']);
events.append(bEventType['bEventEndBackupJob']);
events.append(bEventType['bEventEndFileSet']);
events.append(bEventType['bEventHandleBackupFile']);
RegisterEvents(context, events);
# get some static Bareos values
self.fdname = GetValue(context, bVariable['bVarFDName']);
self.jobId = GetValue(context, bVariable['bVarJobId']);
self.client = GetValue(context, bVariable['bVarClient']);
self.level = GetValue(context, bVariable['bVarLevel']);
self.jobName = GetValue(context, bVariable['bVarJobName']);
self.workingdir = GetValue(context, bVariable['bVarWorkingDir']);
DebugMessage(context, 100, "FDName = " + self.fdname + " - BareosFdPluginBaseclass\n");
DebugMessage(context, 100, "WorkingDir = " + self.workingdir + " jobId: " + str(self.jobId) + "\n");
def parse_plugin_definition(self,context, plugindef):
DebugMessage(context, 100, "plugin def parser called with " + plugindef + "\n");
# Parse plugin options into a dict
self.options = dict();
plugin_options = plugindef.split(":");
for current_option in plugin_options:
key,sep,val = current_option.partition("=");
DebugMessage(context, 100, "key:val: " + key + ':' + val + "\n");
if val == '':
continue;
else:
self.options[key] = val;
# you should overload this method with your own and do option checking here, return bRCs['bRC_Error'], if options are not ok
# or better call super.parse_plugin_definition in your own class and make sanity check on self.options afterwards
return bRCs['bRC_OK'];
def plugin_io(self, context, IOP):
DebugMessage(context, 100, "plugin_io called with " + str(IOP) + "\n");
FNAME = IOP.fname;
if IOP.func == bIOPS['IO_OPEN']:
try:
if IOP.flags & (O_CREAT | O_WRONLY):
self.file = open(FNAME, 'wb');
else:
self.file = open(FNAME, 'rb');
except:
IOP.status = -1;
return bRCs['bRC_Error'];
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_CLOSE']:
self.file.close();
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_SEEK']:
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_READ']:
IOP.buf = bytearray(IOP.count);
IOP.status = self.file.readinto(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_WRITE']:
IOP.status = self.file.write(IOP.buf);
IOP.io_errno = 0
return bRCs['bRC_OK'];
def handle_plugin_event(self, context, event):
if event == bEventType['bEventJobEnd']:
DebugMessage(context, 100, "handle_plugin_event called with bEventJobEnd\n");
elif event == bEventType['bEventEndBackupJob']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndBackupJob\n");
elif event == bEventType['bEventEndFileSet']:
DebugMessage(context, 100, "handle_plugin_event called with bEventEndFileSet\n");
else:
DebugMessage(context, 100, "handle_plugin_event called with event" + str(event) + "\n");
return bRCs['bRC_OK'];
def start_backup_file(self,context, savepkt):
DebugMessage(context, 100, "start_backup called\n");
# Base method, we do not add anything, overload this method with your implementation to add files to backup fileset
return bRCs['bRC_Skip'];
def end_backup_file(self, context):
DebugMessage(context, 100, "end_backup_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def start_restore_file(self, context, cmd):
DebugMessage(context, 100, "start_restore_file() entry point in Python called with" + str(cmd) + "\n")
return bRCs['bRC_OK'];
def end_restore_file(self,context):
DebugMessage(context, 100, "end_restore_file() entry point in Python called\n")
return bRCs['bRC_OK'];
def restore_object_data(self, context, ROP):
DebugMessage(context, 100, "restore_object_data called with " + str(ROP) + "\n");
return bRCs['bRC_OK'];
def create_file(self,context, restorepkt):
DebugMessage(context, 100, "create_file() entry point in Python called with" + str(restorepkt) + "\n")
restorepkt.create_status = bCFs['CF_EXTRACT'];
return bRCs['bRC_OK'];
def check_file(self,context, fname):
DebugMessage(context, 100, "check_file() entry point in Python called with" + str(fname) + "\n")
return bRCs['bRC_OK'];
def handle_backup_file(self,context, savepkt):
DebugMessage(context, 100, "handle_backup_file called with " + str(savepkt) + "\n");
return bRCs['bRC_OK'];
# vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
| agpl-3.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/nbformat/v4/nbjson.py | 11 | 1921 | """Read and write notebooks in JSON format."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import copy
import json
from ipython_genutils import py3compat
from .nbbase import from_dict
from .rwbase import (
NotebookReader, NotebookWriter, rejoin_lines, split_lines, strip_transient
)
class BytesEncoder(json.JSONEncoder):
"""A JSON encoder that accepts b64 (and other *ascii*) bytestrings."""
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode('ascii')
return json.JSONEncoder.default(self, obj)
class JSONReader(NotebookReader):
def reads(self, s, **kwargs):
"""Read a JSON string into a Notebook object"""
nb = json.loads(s, **kwargs)
nb = self.to_notebook(nb, **kwargs)
return nb
def to_notebook(self, d, **kwargs):
"""Convert a disk-format notebook dict to in-memory NotebookNode
handles multi-line values as strings, scrubbing of transient values, etc.
"""
nb = from_dict(d)
nb = rejoin_lines(nb)
nb = strip_transient(nb)
return nb
class JSONWriter(NotebookWriter):
def writes(self, nb, **kwargs):
"""Serialize a NotebookNode object as a JSON string"""
kwargs['cls'] = BytesEncoder
kwargs['indent'] = 1
kwargs['sort_keys'] = True
kwargs['separators'] = (',',': ')
kwargs.setdefault('ensure_ascii', False)
# don't modify in-memory dict
nb = copy.deepcopy(nb)
if kwargs.pop('split_lines', True):
nb = split_lines(nb)
nb = strip_transient(nb)
return py3compat.cast_unicode_py2(json.dumps(nb, **kwargs), 'utf-8')
_reader = JSONReader()
_writer = JSONWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
| mit |
bcornwellmott/frappe | frappe/commands/docs.py | 7 | 2238 | from __future__ import unicode_literals, absolute_import
import click
import os
import frappe
from frappe.commands import pass_context
@click.command('write-docs')
@pass_context
@click.argument('app')
@click.option('--target', default=None)
@click.option('--local', default=False, is_flag=True, help='Run app locally')
def write_docs(context, app, target=None, local=False):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
if not target:
target = os.path.abspath(os.path.join("..", "docs", app))
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('build-docs')
@pass_context
@click.argument('app')
@click.option('--docs-version', default='current')
@click.option('--target', default=None)
@click.option('--local', default=False, is_flag=True, help='Run app locally')
@click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite')
def build_docs(context, app, docs_version="current", target=None, local=False, watch=False):
"Setup docs in target folder of target app"
from frappe.utils import watch as start_watch
if not target:
target = os.path.abspath(os.path.join("..", "docs", app))
for site in context.sites:
_build_docs_once(site, app, docs_version, target, local)
if watch:
def trigger_make(source_path, event_type):
if "/templates/autodoc/" in source_path:
_build_docs_once(site, app, docs_version, target, local)
elif ("/docs.css" in source_path
or "/docs/" in source_path
or "docs.py" in source_path):
_build_docs_once(site, app, docs_version, target, local, only_content_updated=True)
apps_path = frappe.get_app_path(app, "..", "..")
start_watch(apps_path, handler=trigger_make)
def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False):
from frappe.utils.setup_docs import setup_docs
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
if not only_content_updated:
make.build(docs_version)
make.make_docs(target, local)
finally:
frappe.destroy()
commands = [
build_docs,
write_docs,
]
| mit |
uclouvain/osis_louvain | manage.py | 1 | 1269 | #!/usr/bin/env python
import os
import sys
import dotenv
if __name__ == "__main__":
if 'test' in sys.argv:
os.environ.setdefault('TESTING', 'True')
dotenv.read_dotenv()
SETTINGS_FILE = os.environ.get('DJANGO_SETTINGS_MODULE', 'backoffice.settings.local')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_FILE)
from django.core.management import execute_from_command_line
try:
execute_from_command_line(sys.argv)
except KeyError as ke:
print("Error loading application.")
print("The following environment var is not defined : {}".format(str(ke)))
print("Check the following possible causes :")
print(" - You don't have a .env file. You can copy .env.example to .env to use default")
print(" - Mandatory variables are not defined in your .env file.")
sys.exit("SettingsKeyError")
except ImportError as ie:
print("Error loading application : {}".format(str(ie)))
print("Check the following possible causes :")
print(" - The DJANGO_SETTINGS_MODULE defined in your .env doesn't exist")
print(" - No DJANGO_SETTINGS_MODULE is defined and the default 'backoffice.settings.local' doesn't exist ")
sys.exit("DjangoSettingsError")
| agpl-3.0 |
pforret/python-for-android | python-modules/twisted/twisted/web/rewrite.py | 57 | 1862 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.web import resource
class RewriterResource(resource.Resource):
def __init__(self, orig, *rewriteRules):
resource.Resource.__init__(self)
self.resource = orig
self.rewriteRules = list(rewriteRules)
def _rewrite(self, request):
for rewriteRule in self.rewriteRules:
rewriteRule(request)
def getChild(self, path, request):
request.postpath.insert(0, path)
request.prepath.pop()
self._rewrite(request)
path = request.postpath.pop(0)
request.prepath.append(path)
return self.resource.getChildWithDefault(path, request)
def render(self, request):
self._rewrite(request)
return self.resource.render(request)
def tildeToUsers(request):
if request.postpath and request.postpath[0][:1]=='~':
request.postpath[:1] = ['users', request.postpath[0][1:]]
request.path = '/'+'/'.join(request.prepath+request.postpath)
def alias(aliasPath, sourcePath):
"""
I am not a very good aliaser. But I'm the best I can be. If I'm
aliasing to a Resource that generates links, and it uses any parts
of request.prepath to do so, the links will not be relative to the
aliased path, but rather to the aliased-to path. That I can't
alias static.File directory listings that nicely. However, I can
still be useful, as many resources will play nice.
"""
sourcePath = sourcePath.split('/')
aliasPath = aliasPath.split('/')
def rewriter(request):
if request.postpath[:len(aliasPath)] == aliasPath:
after = request.postpath[len(aliasPath):]
request.postpath = sourcePath + after
request.path = '/'+'/'.join(request.prepath+request.postpath)
return rewriter
| apache-2.0 |
vgrachev8/youtube-dl | youtube_dl/extractor/radiofrance.py | 2 | 2024 | # coding: utf-8
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = u'radiofrance'
_TEST = {
u'url': u'http://maison.radiofrance.fr/radiovisions/one-one',
u'file': u'one-one.ogg',
u'md5': u'bdbb28ace95ed0e04faab32ba3160daf',
u'info_dict': {
u"title": u"One to one",
u"description": u"Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
u"uploader": u"Thomas Hercouët",
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, u'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, u'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, u'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, u'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
}
for fm in
re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str)
]
# No sorting, we don't know any more about these formats
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}
| unlicense |
walksun/robotframework-selenium2library | test/unit/locators/test_elementfinder.py | 35 | 14209 | import unittest
import os
from Selenium2Library.locators import ElementFinder
from mockito import *
class ElementFinderTests(unittest.TestCase):
def test_find_with_invalid_prefix(self):
finder = ElementFinder()
browser = mock()
try:
self.assertRaises(ValueError, finder.find, browser, "something=test1")
except ValueError as e:
self.assertEqual(e.message, "Element locator with prefix 'something' is not supported")
def test_find_with_null_browser(self):
finder = ElementFinder()
self.assertRaises(AssertionError,
finder.find, None, "id=test1")
def test_find_with_null_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, None)
def test_find_with_empty_locator(self):
finder = ElementFinder()
browser = mock()
self.assertRaises(AssertionError,
finder.find, browser, "")
def test_find_with_no_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1")
verify(browser).find_elements_by_xpath("//*[(@id='test1' or @name='test1')]")
def test_find_with_tag(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='div')
verify(browser).find_elements_by_xpath("//div[(@id='test1' or @name='test1')]")
def test_find_with_locator_with_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test '1'")
verify(browser).find_elements_by_xpath("//*[(@id=\"test '1'\" or @name=\"test '1'\")]")
def test_find_with_locator_with_quote(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\"")
verify(browser).find_elements_by_xpath("//*[(@id='test \"1\"' or @name='test \"1\"')]")
def test_find_with_locator_with_quote_and_apos(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test \"1\" and '2'")
verify(browser).find_elements_by_xpath(
"//*[(@id=concat('test \"1\" and ', \"'\", '2', \"'\", '') or @name=concat('test \"1\" and ', \"'\", '2', \"'\", ''))]")
def test_find_with_a(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='a')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_link_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='link')
verify(browser).find_elements_by_xpath(
"//a[(@id='test1' or @name='test1' or @href='test1' or normalize-space(descendant-or-self::text())='test1' or @href='http://localhost/test1')]")
def test_find_with_img(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='img')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_image_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='image')
verify(browser).find_elements_by_xpath(
"//img[(@id='test1' or @name='test1' or @src='test1' or @alt='test1' or @src='http://localhost/test1')]")
def test_find_with_input(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='input')
verify(browser).find_elements_by_xpath(
"//input[(@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_radio_button_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='radio button')
verify(browser).find_elements_by_xpath(
"//input[@type='radio' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_checkbox_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='checkbox')
verify(browser).find_elements_by_xpath(
"//input[@type='checkbox' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_file_upload_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='file upload')
verify(browser).find_elements_by_xpath(
"//input[@type='file' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_text_field_synonym(self):
finder = ElementFinder()
browser = mock()
when(browser).get_current_url().thenReturn("http://localhost/mypage.html")
finder.find(browser, "test1", tag='text field')
verify(browser).find_elements_by_xpath(
"//input[@type='text' and (@id='test1' or @name='test1' or @value='test1' or @src='test1' or @src='http://localhost/test1')]")
def test_find_with_button(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='button')
verify(browser).find_elements_by_xpath(
"//button[(@id='test1' or @name='test1' or @value='test1' or normalize-space(descendant-or-self::text())='test1')]")
def test_find_with_select(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='select')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_list_synonym(self):
finder = ElementFinder()
browser = mock()
finder.find(browser, "test1", tag='list')
verify(browser).find_elements_by_xpath(
"//select[(@id='test1' or @name='test1')]")
def test_find_with_implicit_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_identifier(self):
finder = ElementFinder()
browser = mock()
id_elements = self._make_mock_elements('div', 'a')
name_elements = self._make_mock_elements('span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(list(id_elements)).thenReturn(list(id_elements))
when(browser).find_elements_by_name("test1").thenReturn(list(name_elements)).thenReturn(list(name_elements))
all_elements = list(id_elements)
all_elements.extend(name_elements)
result = finder.find(browser, "identifier=test1")
self.assertEqual(result, all_elements)
result = finder.find(browser, "identifier=test1", tag='a')
self.assertEqual(result, [id_elements[1], name_elements[1]])
def test_find_by_id(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_name("test1").thenReturn(elements)
result = finder.find(browser, "name=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "name=test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_xpath(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_xpath("//*[(@test='1')]").thenReturn(elements)
result = finder.find(browser, "xpath=//*[(@test='1')]")
self.assertEqual(result, elements)
result = finder.find(browser, "xpath=//*[(@test='1')]", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_dom(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).execute_script("return document.getElementsByTagName('a');").thenReturn(
[elements[1], elements[3]])
result = finder.find(browser, "dom=document.getElementsByTagName('a')")
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_link_text(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_link_text("my link").thenReturn(elements)
result = finder.find(browser, "link=my link")
self.assertEqual(result, elements)
result = finder.find(browser, "link=my link", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_css_selector(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_css_selector("#test1").thenReturn(elements)
result = finder.find(browser, "css=#test1")
self.assertEqual(result, elements)
result = finder.find(browser, "css=#test1", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_by_tag_name(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_tag_name("div").thenReturn(elements)
result = finder.find(browser, "tag=div")
self.assertEqual(result, elements)
result = finder.find(browser, "tag=div", tag='a')
self.assertEqual(result, [elements[1], elements[3]])
def test_find_with_sloppy_prefix(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "ID=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "iD=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, " id =test1")
self.assertEqual(result, elements)
def test_find_with_sloppy_criteria(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'a', 'span', 'a')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id= test1 ")
self.assertEqual(result, elements)
def test_find_by_id_with_synonym_and_constraints(self):
finder = ElementFinder()
browser = mock()
elements = self._make_mock_elements('div', 'input', 'span', 'input', 'a', 'input', 'div', 'input')
elements[1].set_attribute('type', 'radio')
elements[3].set_attribute('type', 'checkbox')
elements[5].set_attribute('type', 'text')
elements[7].set_attribute('type', 'file')
when(browser).find_elements_by_id("test1").thenReturn(elements)
result = finder.find(browser, "id=test1")
self.assertEqual(result, elements)
result = finder.find(browser, "id=test1", tag='input')
self.assertEqual(result, [elements[1], elements[3], elements[5], elements[7]])
result = finder.find(browser, "id=test1", tag='radio button')
self.assertEqual(result, [elements[1]])
result = finder.find(browser, "id=test1", tag='checkbox')
self.assertEqual(result, [elements[3]])
result = finder.find(browser, "id=test1", tag='text field')
self.assertEqual(result, [elements[5]])
result = finder.find(browser, "id=test1", tag='file upload')
self.assertEqual(result, [elements[7]])
def _make_mock_elements(self, *tags):
elements = []
for tag in tags:
element = self._make_mock_element(tag)
elements.append(element)
return elements
def _make_mock_element(self, tag):
element = mock()
element.tag_name = tag
element.attributes = {}
def set_attribute(name, value):
element.attributes[name] = value
element.set_attribute = set_attribute
def get_attribute(name):
return element.attributes[name]
element.get_attribute = get_attribute
return element
| apache-2.0 |
yuyichao/pyscical | pyscical/utils.py | 1 | 1930 | # Copyright (C) 2012~2014 by Yichao Yu
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def cffi_ptr(obj, _ffi, writable=False, retain=False):
if isinstance(obj, bytes):
if writable:
# bytes is not writable
raise TypeError('expected an object with a writable '
'buffer interface.')
if retain:
buf = _ffi.new('char[]', obj)
return (buf, len(obj), buf)
return (obj, len(obj), obj)
elif isinstance(obj, np.ndarray):
# numpy array
return (_ffi.cast('void*', obj.__array_interface__['data'][0]),
obj.nbytes, obj)
elif isinstance(obj, np.generic):
if writable or retain:
raise TypeError('expected an object with a writable '
'buffer interface.')
# numpy scalar
#
# * obj.__array_interface__ exists in CPython although requires
# holding a reference to the dynamically created
# __array_interface__ object
#
# * does not exist (yet?) in numpypy.
s_array = obj[()]
return (_ffi.cast('void*', s_array.__array_interface__['data'][0]),
s_array.nbytes, s_array)
raise TypeError("Only numpy arrays and bytes can be converted")
| gpl-3.0 |
potzenheimer/meetshaus | src/meetshaus.sitetheme/meetshaus/sitetheme/tests.py | 1 | 1419 | import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import meetshaus.sitetheme
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(meetshaus.sitetheme)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='meetshaus.sitetheme.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='meetshaus.sitetheme',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit |
yglazner/jy-dev | jy_dev/docopt.py | 11 | 19946 | """Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, [email protected]
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.1'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, c in enumerate(self.children):
if not hasattr(c, 'children'):
assert c in uniq
self.children[i] = uniq[uniq.index(c)]
else:
c.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(c.children) for c in self.either.children]
for case in either:
for e in [c for c in case if case.count(c) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
@property
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif AnyOptions in types:
optional = [c for c in children if type(c) is AnyOptions][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
class ChildPattern(Pattern):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class ParentPattern(Pattern):
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([c.flat(*types) for c in self.children], [])
class Argument(ChildPattern):
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
return n, Argument(self.name, p.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name = name
self.value = value
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
if p.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long = short, long
self.argcount, self.value = argcount, value
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, long, argcount, value)
def single_match(self, left):
for n, p in enumerate(left):
if self.name == p.name:
return n, p
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for p in self.children:
matched, l, c = p.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for p in self.children:
m, left, collected = p.match(left, collected)
return True, left, collected
class AnyOptions(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for p in self.children:
matched, _, _ = outcome = p.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class TokenStream(list):
def __init__(self, source, error):
self += source.split() if hasattr(source, 'split') else source
self.error = error
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() is None:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() is None:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = TokenStream(re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source),
DocoptLanguageError)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [AnyOptions()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
# in python < 2.7 you can't pass flags=re.MULTILINE
split = re.split('\n *(<\S+?>|-\S+?)', doc)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
#arguments = [Argument.parse(s) for s in split if s.startswith('<')]
#return options, arguments
return options
def printable_usage(doc):
# in python < 2.7 you can't pass flags=re.IGNORECASE
usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc)
if len(usage_split) < 3:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_split) > 3:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def formal_usage(printable_usage):
pu = printable_usage.split()[1:] # split and drop "usage:"
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| mit |
mikedh/trimesh | examples/voxel.py | 2 | 4350 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import inspect
import trimesh
from trimesh.exchange.binvox import voxelize_mesh
from trimesh import voxel as v
dir_current = os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe())))
# the absolute path for our reference models
dir_models = os.path.abspath(
os.path.join(dir_current, '..', 'models'))
def show(chair_mesh, chair_voxels, colors=(1, 1, 1, 0.3)):
scene = chair_mesh.scene()
scene.add_geometry(chair_voxels.as_boxes(colors=colors))
scene.show()
if __name__ == '__main__':
base_name = 'chair_model'
chair_mesh = trimesh.load(os.path.join(dir_models, '%s.obj' % base_name))
if isinstance(chair_mesh, trimesh.scene.Scene):
chair_mesh = trimesh.util.concatenate([
trimesh.Trimesh(mesh.vertices, mesh.faces)
for mesh in chair_mesh.geometry.values()])
binvox_path = os.path.join(dir_models, '%s.binvox' % base_name)
chair_voxels = trimesh.load(binvox_path)
chair_voxels = v.VoxelGrid(chair_voxels.encoding.dense, chair_voxels.transform)
print('white: voxelized chair (binvox, exact)')
show(chair_mesh, voxelize_mesh(chair_mesh, exact=True), colors=(1, 1, 1, 0.3))
print('red: binvox-loaded chair')
show(chair_mesh, chair_voxels, colors=(1, 0, 0, 0.3))
voxelized_chair_mesh = chair_mesh.voxelized(np.max(chair_mesh.extents) / 32)
print('green: voxelized chair (default).')
show(chair_mesh, voxelized_chair_mesh, colors=(0, 1, 0, 0.3))
shape = (50, 17, 63)
revox = chair_voxels.revoxelized(shape)
print('cyan: revoxelized.')
show(chair_mesh, revox, colors=(0, 1, 1, 0.3))
values = chair_voxels.encoding.dense.copy()
values[:values.shape[0] // 2] = 0
stripped = v.VoxelGrid(values, chair_voxels.transform.copy()).strip()
print('yellow: stripped halved voxel grid. Transform is updated appropriately')
show(chair_mesh, stripped, colors=(1, 1, 0, 0.3))
transform = np.eye(4)
transform[:3] += np.random.normal(size=(3, 4)) * 0.2
transformed_chair_mesh = chair_mesh.copy().apply_transform(transform)
print('original transform volume: %s'
% str(chair_voxels.element_volume))
chair_voxels.apply_transform(transform)
print('warped transform volume: %s' % str(chair_voxels.element_volume))
print('blue: transformed voxels. Transformation is lazy, and each voxel is '
'no longer a cube.')
show(transformed_chair_mesh, chair_voxels, colors=(0, 0, 1, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='subdivide').fill()
print('green: subdivided')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='ray')
print('red: ray. Poor performance on thin structures')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox')
print('red: binvox (default). Poor performance on thin structures')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', wireframe=True)
print('green: binvox (wireframe). Still doesn\'t capture all thin structures')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
voxelized = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True)
print('blue: binvox (exact). Does a good job')
show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3))
voxelized = chair_mesh.voxelized(
pitch=0.02,
method='binvox',
exact=True,
downsample_factor=2,
downsample_threshold=1)
print('red: binvox (exact downsampled) surface')
show(chair_mesh, voxelized, colors=(1, 0, 0, 0.3))
chair_voxels = chair_mesh.voxelized(pitch=0.02, method='binvox', exact=True)
voxelized = chair_voxels.copy().fill(method='base')
print('blue: binvox (exact) filled (base). Gets a bit overly excited')
show(chair_mesh, voxelized, colors=(0, 0, 1, 0.3))
voxelized = chair_voxels.copy().fill(method='orthographic')
print('green: binvox (exact) filled (orthographic). '
'Doesn\'t do much as should be expected')
show(chair_mesh, voxelized, colors=(0, 1, 0, 0.3))
| mit |
zyga/ubuntu-make | tests/large/test_ide.py | 6 | 18449 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
# Tin Tvrtković
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the IDE category"""
import logging
import platform
import subprocess
import os
import pexpect
from tests.large import LargeFrameworkTests
from tests.tools import UMAKE
logger = logging.getLogger(__name__)
class EclipseIDETests(LargeFrameworkTests):
"""The Eclipse distribution from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/eclipse")
self.desktop_filename = "eclipse.desktop"
@property
def arch_option(self):
"""we return the expected arch call on command line"""
return platform.machine()
def test_default_eclipse_ide_install(self):
"""Install eclipse from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# on 64 bits, there is a java subprocess, we kill that one with SIGKILL (eclipse isn't reliable on SIGTERM)
if self.arch_option == "x86_64":
self.check_and_kill_process(["java", self.arch_option, self.installed_path],
wait_before=self.TIMEOUT_START, send_sigkill=True)
else:
self.check_and_kill_process([self.exec_path],
wait_before=self.TIMEOUT_START, send_sigkill=True)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide eclipse'.format(UMAKE)))
self.expect_and_no_warn("Eclipse is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class IdeaIDETests(LargeFrameworkTests):
"""IntelliJ Idea from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/idea")
self.desktop_filename = 'jetbrains-idea.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide idea'.format(UMAKE)))
self.expect_and_no_warn("Idea is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class IdeaUltimateIDETests(LargeFrameworkTests):
"""IntelliJ Idea Ultimate from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/idea-ultimate")
self.desktop_filename = 'jetbrains-idea.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide idea-ultimate'.format(UMAKE)))
self.expect_and_no_warn("Idea Ultimate is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmIDETests(LargeFrameworkTests):
"""PyCharm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm'.format(UMAKE)))
self.expect_and_no_warn("PyCharm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmEducationalIDETests(LargeFrameworkTests):
"""PyCharm Educational from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm-educational")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm-educational'.format(UMAKE)))
self.expect_and_no_warn("PyCharm Educational is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PyCharmProfessionalIDETests(LargeFrameworkTests):
"""PyCharm Professional from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/pycharm-professional")
self.desktop_filename = 'jetbrains-pycharm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide pycharm-professional'.format(UMAKE)))
self.expect_and_no_warn("PyCharm Professional is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class RubyMineIDETests(LargeFrameworkTests):
"""RubyMine from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/rubymine")
self.desktop_filename = 'jetbrains-rubymine.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide rubymine'.format(UMAKE)))
self.expect_and_no_warn("RubyMine is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class WebStormIDETests(LargeFrameworkTests):
"""WebStorm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/webstorm")
self.desktop_filename = 'jetbrains-webstorm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide webstorm'.format(UMAKE)))
self.expect_and_no_warn("WebStorm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class PhpStormIDETests(LargeFrameworkTests):
"""PhpStorm from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/phpstorm")
self.desktop_filename = 'jetbrains-phpstorm.desktop'
def test_default_install(self):
"""Install from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
logger.info("Installed, running...")
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", self.installed_path], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide phpstorm'.format(UMAKE)))
self.expect_and_no_warn("PhpStorm is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
class ArduinoIDETests(LargeFrameworkTests):
"""The Arduino Software distribution from the IDE collection."""
TIMEOUT_INSTALL_PROGRESS = 120
TIMEOUT_START = 60
TIMEOUT_STOP = 60
def setUp(self):
super().setUp()
self.installed_path = os.path.expanduser("~/tools/ide/arduino")
self.desktop_filename = "arduino.desktop"
@property
def arch_option(self):
"""we return the expected arch call on command line"""
return platform.machine()
def test_default_install(self):
"""Install the distribution from scratch test case"""
self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_no_warn()
# we have an installed launcher, added to the launcher and an icon file
self.assertTrue(self.launcher_exists_and_is_pinned(self.desktop_filename))
self.assert_exec_exists()
self.assert_icon_exists()
self.assertTrue(self.is_in_group("dialout"))
# launch it, send SIGTERM and check that it exits fine
proc = subprocess.Popen(self.command_as_list(self.exec_path), stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.check_and_kill_process(["java", "processing.app.Base"], wait_before=self.TIMEOUT_START)
proc.wait(self.TIMEOUT_STOP)
# ensure that it's detected as installed:
self.child = pexpect.spawnu(self.command('{} ide arduino'.format(UMAKE)))
self.expect_and_no_warn("Arduino is already installed.*\[.*\] ")
self.child.sendline()
self.wait_and_no_warn()
| gpl-3.0 |
cloudnautique/cloud-cattle | code/agent/src/agents/pyagent/cattle/plugins/core/event_handlers.py | 11 | 2000 | import os
import subprocess
from cattle import utils
from cattle import Config
from cattle.type_manager import types
from cattle.progress import Progress
def _should_handle(handler, event):
name = event.name.split(';', 1)[0]
if name not in handler.events() or event.replyTo is None:
return False
return True
class PingHandler:
def __init__(self):
pass
def events(self):
return ['ping']
def execute(self, event):
if not _should_handle(self, event):
return
resp = utils.reply(event)
if Config.do_ping():
for type in types():
if hasattr(type, 'on_ping'):
type.on_ping(event, resp)
return resp
class ConfigUpdateHandler:
def __init__(self):
pass
def events(self):
return ['config.update']
def execute(self, event):
if not _should_handle(self, event):
return
if len(event.data.items) == 0:
return utils.reply(event)
item_names = []
for item in event.data.items:
# For development, don't let the server kill your agent
if item.name != 'pyagent' or Config.config_update_pyagent():
item_names.append(item.name)
home = Config.home()
env = dict(os.environ)
env['CATTLE_ACCESS_KEY'] = Config.access_key()
env['CATTLE_SECRET_KEY'] = Config.secret_key()
env['CATTLE_CONFIG_URL'] = Config.config_url()
env['CATTLE_HOME'] = home
args = [Config.config_sh()] + item_names
try:
output = utils.get_command_output(args, cwd=home, env=env)
return utils.reply(event, {
'exitCode': 0,
'output': output
})
except subprocess.CalledProcessError as e:
Progress(event).update('Update Failed', data={
'exitCode': e.returncode,
'output': e.output
})
| apache-2.0 |
capchu/TextRPGOnline | rpgonline/env/lib/python2.7/site-packages/pip/vendor/html5lib/treebuilders/dom.py | 249 | 11328 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
def dom2sax(node, handler, nsmap={'xml': XML_NAMESPACE}):
if node.nodeType == Node.ELEMENT_NODE:
if not nsmap:
handler.startElement(node.nodeName, node.attributes)
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endElement(node.nodeName)
else:
attributes = dict(node.attributes.itemsNS())
# gather namespace declarations
prefixes = []
for attrname in list(node.attributes.keys()):
attr = node.getAttributeNode(attrname)
if (attr.namespaceURI == XMLNS_NAMESPACE or
(attr.namespaceURI is None and attr.nodeName.startswith('xmlns'))):
prefix = (attr.nodeName != 'xmlns' and attr.nodeName or None)
handler.startPrefixMapping(prefix, attr.nodeValue)
prefixes.append(prefix)
nsmap = nsmap.copy()
nsmap[prefix] = attr.nodeValue
del attributes[(attr.namespaceURI, attr.nodeName)]
# apply namespace declarations
for attrname in list(node.attributes.keys()):
attr = node.getAttributeNode(attrname)
if attr.namespaceURI is None and ':' in attr.nodeName:
prefix = attr.nodeName.split(':')[0]
if prefix in nsmap:
del attributes[(attr.namespaceURI, attr.nodeName)]
attributes[(nsmap[prefix], attr.nodeName)] = attr.nodeValue
# SAX events
ns = node.namespaceURI or nsmap.get(None, None)
handler.startElementNS((ns, node.nodeName), node.nodeName, attributes)
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endElementNS((ns, node.nodeName), node.nodeName)
for prefix in prefixes:
handler.endPrefixMapping(prefix)
elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
handler.characters(node.nodeValue)
elif node.nodeType == Node.DOCUMENT_NODE:
handler.startDocument()
for child in node.childNodes:
dom2sax(child, handler, nsmap)
handler.endDocument()
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
for child in node.childNodes:
dom2sax(child, handler, nsmap)
else:
# ATTRIBUTE_NODE
# ENTITY_NODE
# PROCESSING_INSTRUCTION_NODE
# COMMENT_NODE
# DOCUMENT_TYPE_NODE
# NOTATION_NODE
pass
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| gpl-3.0 |
mwstobo/marshmallow | tests/test_exceptions.py | 7 | 2753 | # -*- coding: utf-8 -*-
import pytest
from marshmallow.exceptions import ValidationError, MarshallingError, UnmarshallingError
from marshmallow import fields, Schema
class TestValidationError:
def test_stores_message_in_list(self):
err = ValidationError('foo')
assert err.messages == ['foo']
def test_can_pass_list_of_messages(self):
err = ValidationError(['foo', 'bar'])
assert err.messages == ['foo', 'bar']
def test_stores_dictionaries(self):
messages = {'user': {'email': ['email is invalid']}}
err = ValidationError(messages)
assert err.messages == messages
def test_can_store_field_names(self):
err = ValidationError('invalid email', field_names='email')
assert err.field_names == ['email']
err = ValidationError('invalid email', field_names=['email'])
assert err.field_names == ['email']
def test_str(self):
err = ValidationError('invalid email')
assert str(err) == 'invalid email'
err2 = ValidationError('invalid email', 'email')
assert str(err2) == 'invalid email'
class TestMarshallingError:
def test_deprecated(self):
pytest.deprecated_call(MarshallingError, 'foo')
def test_can_store_field_and_field_name(self):
field_name = 'foo'
field = fields.Str()
err = MarshallingError('something went wrong', fields=[field],
field_names=[field_name])
assert err.fields == [field]
assert err.field_names == [field_name]
def test_can_be_raised_by_custom_field(self):
class MyField(fields.Field):
def _serialize(self, val, attr, obj):
raise MarshallingError('oops')
class MySchema(Schema):
foo = MyField()
s = MySchema()
result = s.dump({'foo': 42})
assert 'foo' in result.errors
assert result.errors['foo'] == ['oops']
class TestUnmarshallingError:
def test_deprecated(self):
pytest.deprecated_call(UnmarshallingError, 'foo')
def test_can_store_field_and_field_name(self):
field_name = 'foo'
field = fields.Str()
err = UnmarshallingError('something went wrong', fields=[field],
field_names=[field_name])
assert err.fields == [field]
assert err.field_names == [field_name]
def test_can_be_raised_by_validator(self):
def validator(val):
raise UnmarshallingError('oops')
class MySchema(Schema):
foo = fields.Field(validate=[validator])
s = MySchema()
result = s.load({'foo': 42})
assert 'foo' in result.errors
assert result.errors['foo'] == ['oops']
| mit |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/tests_over_3dmask_generator.py | 1 | 1589 | import sys
import os
from lib.data_loader import utils_mask3d
sys.path.append(os.path.dirname(os.getcwd()))
from lib.utils import output_utils
from lib.data_loader import mri_atlas
from lib.data_loader import pet_atlas
from lib.data_loader import PET_stack_NORAD
from lib.data_loader import MRI_stack_NORAD
from lib.utils.os_aux import create_directories
import settings
region = 75
#images = "MRI"
images = "PET"
path_folder3D = os.path.join(settings.path_to_project, "folder3D")
path_folder_masks3d = os.path.join(path_folder3D, "masks3D")
path_mask = os.path.join(
path_folder_masks3d, "{1}_region:{0}".format(region, images))
create_directories([path_folder3D, path_folder_masks3d])
atlas = None
reshape_kind = None
colour_kind = None
stack_dict = None
if images == "MRI":
stack_dict = MRI_stack_NORAD.get_gm_stack()
reshape_kind = "A"
colour_kind = "Greys"
atlas = mri_atlas.load_atlas_mri()
elif images == "PET":
stack_dict = PET_stack_NORAD.get_full_stack()
reshape_kind = "F"
colour_kind = "jet"
total_size = stack_dict['total_size']
imgsize = stack_dict['imgsize']
voxels_index = stack_dict['voxel_index']
map_region_voxels = atlas[region] # index refered to nbground voxels
no_bg_region_voxels_index = voxels_index[map_region_voxels]
mask3d = utils_mask3d.generate_region_3dmaskatlas(
no_bg_region_voxels_index=no_bg_region_voxels_index,
reshape_kind=reshape_kind,
imgsize=imgsize,
totalsize=total_size)
output_utils.from_3d_image_to_nifti_file(
path_to_save=path_mask,
image3d=mask3d)
| gpl-2.0 |
boberfly/gaffer | python/GafferUI/Button.py | 7 | 6312 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import Gaffer
import GafferUI
from Qt import QtGui
from Qt import QtWidgets
from Qt import QtCore
class Button( GafferUI.Widget ) :
__palette = None
def __init__( self, text="", image=None, hasFrame=True, highlightOnOver=True, **kw ) :
GafferUI.Widget.__init__( self, QtWidgets.QPushButton(), **kw )
self.__highlightForHover = False
self._qtWidget().setAttribute( QtCore.Qt.WA_LayoutUsesWidgetRect )
# allow return and enter keys to click button
self._qtWidget().setAutoDefault( True )
self.setText( text )
self.setImage( image )
self.setHasFrame( hasFrame )
# using a WeakMethod to avoid circular references which would otherwise
# never be broken.
self._qtWidget().clicked.connect( Gaffer.WeakMethod( self.__clicked ) )
self.__clickedSignal = GafferUI.WidgetSignal()
# buttons appear to totally ignore the etch-disabled-text stylesheet option,
# and we really don't like the etching. the only effective way of disabling it
# seems to be to apply this palette which makes the etched text transparent.
if Button.__palette is None :
Button.__palette = QtGui.QPalette( QtWidgets.QApplication.instance().palette( self._qtWidget() ) )
Button.__palette.setColor( QtGui.QPalette.Disabled, QtGui.QPalette.Light, QtGui.QColor( 0, 0, 0, 0 ) )
self._qtWidget().setPalette( Button.__palette )
if highlightOnOver :
self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
def setHighlighted( self, highlighted ) :
GafferUI.Widget.setHighlighted( self, highlighted )
self.__updateIcon()
def setText( self, text ) :
assert( isinstance( text, six.string_types ) )
self._qtWidget().setText( text )
def getText( self ) :
return self._qtWidget().text()
def setImage( self, imageOrImageFileName ) :
assert( isinstance( imageOrImageFileName, ( six.string_types, GafferUI.Image, type( None ) ) ) )
if isinstance( imageOrImageFileName, six.string_types ) :
self.__image = GafferUI.Image( imageOrImageFileName )
else :
self.__image = imageOrImageFileName
self.__updateIcon()
def getImage( self ) :
return self.__image
def setHasFrame( self, hasFrame ) :
self._qtWidget().setProperty( "gafferWithFrame", hasFrame )
self._qtWidget().setSizePolicy(
QtWidgets.QSizePolicy.Minimum if hasFrame else QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed
)
self._repolish()
def getHasFrame( self ) :
return self._qtWidget().property( "gafferWithFrame" )
def setEnabled( self, enabled ) :
# Once we're disabled, mouse leave events will be skipped, and we'll
# remain in a highlighted state once re-enabled.
if not enabled and self.__highlightForHover :
self.__highlightForHover = False
self.__updateIcon()
GafferUI.Widget.setEnabled( self, enabled )
def clickedSignal( self ) :
return self.__clickedSignal
def __clicked( self, *unusedArgs ) : # currently PyQt passes a "checked" argument and PySide doesn't
# workaround problem whereby not all text fields will have committed their contents
# into plugs when the button is pressed - this occurs particularly in the OpDialogue, and causes
# the op to run without the values the user sees in the ui. normally editingFinished is emitted by
# the text widget itself on a loss of focus, but unfortunately clicking on a button doesn't cause that
# focus loss. so we helpfully emit the signal ourselves here.
focusWidget = GafferUI.Widget._owner( QtWidgets.QApplication.focusWidget() )
if focusWidget is not None and hasattr( focusWidget, "editingFinishedSignal" ) :
focusWidget.editingFinishedSignal()( focusWidget )
self.clickedSignal()( self )
def __updateIcon( self ) :
if self.__image is None :
self._qtWidget().setIcon( QtGui.QIcon() )
return
# Qt's built-in disabled state generation doesn't work well with dark schemes
# There is no built-in support for QtGui.QIcon.Active in the default
# painter, which is why we have to juggle it here.
icon = self.__image._qtIcon( highlighted = self.getHighlighted() or self.__highlightForHover )
self._qtWidget().setIcon( icon )
self._qtWidget().setIconSize( self.__image._qtPixmap().size() )
def __enter( self, widget ) :
self.__highlightForHover = True
self.__updateIcon()
def __leave( self, widget ) :
self.__highlightForHover = False
self.__updateIcon()
| bsd-3-clause |
xxd3vin/spp-sdk | opt/Python27/Lib/site-packages/numpy/ma/core.py | 22 | 226541 | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'bool_',
'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue',
'amax', 'amin', 'anom', 'anomalies', 'any', 'arange',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray', 'asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'ceil', 'choose', 'clip', 'common_fill_value', 'compress',
'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh',
'count', 'cumprod', 'cumsum',
'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump',
'dumps',
'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
'fromfunction',
'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal',
'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2',
'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or',
'masked', 'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'prod', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_', 'round',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes',
'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where',
'zeros']
import cPickle
import numpy as np
from numpy import ndarray, amax, amin, iscomplexobj, bool_
from numpy import array as narray
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy.compat import getargspec, formatargspec
from numpy import expand_dims as n_expand_dims
import warnings
import sys
if sys.version_info[0] >= 3:
from functools import reduce
MaskType = np.bool_
nomask = MaskType(0)
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError, errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
# warnings.warn(msg % (type(obj),
# getattr(obj, '__name__', '???'),
# errmsg))
return sig
#####--------------------------------------------------------------------------
#---- --- Exceptions ---
#####--------------------------------------------------------------------------
class MAError(Exception):
"""Class for masked array related errors."""
pass
class MaskError(MAError):
"Class for mask related errors."
pass
#####--------------------------------------------------------------------------
#---- --- Filling options ---
#####--------------------------------------------------------------------------
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c' : 1.e20 + 0.0j,
'f' : 1.e20,
'i' : 999999,
'O' : '?',
'S' : 'N/A',
'u' : 999999,
'V' : '???',
'U' : 'N/A',
}
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, str):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'):
fill_value = default_fill_value(ndtype)
else:
# In case we want to convert 1e+20 to int...
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)#.item()
except OverflowError:
fill_value = default_fill_value(ndtype)
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
#####--------------------------------------------------------------------------
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
#####--------------------------------------------------------------------------
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
#####--------------------------------------------------------------------------
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
#invalid = (numpy.isnan(a._data) | numpy.isinf(a._data))
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
#####--------------------------------------------------------------------------
#---- --- Ufuncs ---
#####--------------------------------------------------------------------------
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""Define a domain for safe division."""
def __init__ (self, tolerance=None):
self.tolerance = tolerance
def __call__ (self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""DomainGreater(v)(x) is True where x <= v."""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""DomainGreaterEqual(v)(x) is True where x < v."""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
#..............................................................................
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__ (self, mufunc, fill=0, domain=None):
""" _MaskedUnaryOperation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
#
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
# Save the error status
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(d, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
# Case 1.2. : Function without a domain
else:
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
# Case 2.1. : The result is scalarscalar
if not result.ndim:
if m:
return masked
return result
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input
# Now, that's plain silly: in C, we would just skip the element and keep
# the original, but we do have to do it that way in Python
if m is not nomask:
# In case result has a lower dtype than the inputs (as in equal)
try:
np.putmask(result, m, d)
except TypeError:
pass
# Transform to
if isinstance(a, MaskedArray):
subtype = type(a)
else:
subtype = MaskedArray
result = result.view(subtype)
result._mask = m
result._update_from(a)
return result
#
def __str__ (self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, mbfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data, as ndarray
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
# Get the mask
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Get the result
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m.any():
np.putmask(result, m, 0)
# This only makes sense if the operation preserved the dtype
if result.dtype == da.dtype:
result += m * da
# Transforms to a (subclass of) MaskedArray
result = result.view(get_masked_subclass(a, b))
result._mask = m
# Update the optional info from the inputs
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def reduce(self, target, axis=0, dtype=None):
"""Reduce `target` along the given `axis`."""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
return self.f.reduce(t, axis).view(tclass)
t = t.view(tclass)
t._mask = m
tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
tr = tr.view(tclass)
if mr.ndim > 0:
tr._mask = mr
return tr
elif mr:
return masked
return tr
def outer (self, a, b):
"""Return the function applied to the outer product of a and b.
"""
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
if m is not nomask:
np.putmask(d, m, da)
if d.shape:
d = d.view(get_masked_subclass(a, b))
d._mask = m
return d
def accumulate (self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
t = filled(target, self.filly)
return self.f.accumulate(t, axis).view(tclass)
def __str__ (self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data and the mask
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
err_status_ini = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
finally:
np.seterr(**err_status_ini)
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
m |= mb
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da
np.putmask(result, m, 0)
result += m * da
result = result.view(get_masked_subclass(a, b))
result._mask = m
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def __str__ (self):
return "Masked version of " + str(self.f)
#..............................................................................
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs .......................................................
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs ...............................................................
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs ......................................................
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
#####--------------------------------------------------------------------------
#---- --- Mask creation functions ---
#####--------------------------------------------------------------------------
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = newtype
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getdata(arr).dtype)
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has
a dtype of MaskType (bool). If the dtype is flexible, each field
has a boolean dtype.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype: {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or (m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
#
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
#
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
#
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
#
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
#####--------------------------------------------------------------------------
#--- --- Masking functions ---
#####--------------------------------------------------------------------------
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"\
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
# An alternative implementation relies on filling first: probably not needed.
# d = filled(x, 0)
# c = umath.equal(d, value)
# m = mask_or(c, getmask(x))
# return array(d, mask=m, copy=copy)
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print eat
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print eat
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
#####--------------------------------------------------------------------------
#---- --- Printing options ---
#####--------------------------------------------------------------------------
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__ (self, display):
"Create the masked_print_option object."
self._display = display
self._enabled = True
def display(self):
"Display the string to print for masked values."
return self._display
def set_display (self, s):
"Set the string to print for masked values."
self._display = s
def enabled(self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, shrink=1):
"Set the enabling shrink to `shrink`."
self._enabled = shrink
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.putmask(curdata, curmask, printopt)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
#####--------------------------------------------------------------------------
#---- --- MaskedArray class ---
#####--------------------------------------------------------------------------
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
Private function
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.putmask(current, mask[name], fill_value[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
#
def flatten_sequence(iterable):
"""Flattens a compound of nested iterables."""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
#
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
class _arraymethod(object):
"""
Define a wrapper for basic array methods.
Upon call, returns a masked array, where the new ``_data`` array is
the output of the corresponding method called on the original
``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Attributes
----------
_onmask : bool
Holds the `onmask` parameter.
obj : object
The object calling `_arraymethod`.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
"""
def __init__(self, funcname, onmask=True):
self.__name__ = funcname
self._onmask = onmask
self.obj = None
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
methdoc = getattr(ndarray, self.__name__, None) or \
getattr(np, self.__name__, None)
if methdoc is not None:
return methdoc.__doc__
#
def __get__(self, obj, objtype=None):
self.obj = obj
return self
#
def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
#
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
_mask.shape = result.shape
result._mask = _mask
return result
### This won't work is ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def next(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = self.dataiter.next()
if self.maskiter is not None and self.maskiter.next():
d = masked
return d
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None,
copy=False, subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True,
**options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data............
_data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask..........
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray...
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma .......
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
_sharedmask = True
# Process mask ...............................
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
# Case 1. : no mask in input ............
if mask is nomask:
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
# Case 2. : With a mask in input ........
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError, msg % (nd, nm)
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.......
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check....
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
#
def _update_from(self, obj):
"""Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""Finalizes the masked array.
"""
# Get main attributes .........
self._update_from(obj)
if isinstance(obj, ndarray):
odtype = obj.dtype
if odtype.names:
_mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype))
else:
_mask = getattr(obj, '_mask', nomask)
else:
_mask = nomask
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
#..........
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask................
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.putmask(result, d, fill_value)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
#....
return result
def view(self, dtype=None, type=None):
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# Should we update the mask ?
if (getattr(output, '_mask', nomask) is not nomask):
if dtype is None:
dtype = output.dtype
mdtype = make_mask_descr(dtype)
output._mask = self._mask.view(mdtype, ndarray)
# Try to reset the shape of the mask (if we don't have a void)
try:
output._mask.shape = output.shape
except (AttributeError, TypeError):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
output._fill_value = None
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print x.astype(int32)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError, msg
_data = ndarray.view(self, ndarray)
dout = ndarray.__getitem__(_data, indx)
# We could directly use ndarray.__getitem__ on self...
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet...
# So it's easier to stick to the current version
_mask = self._mask
if not getattr(dout, 'ndim', False):
# A record ................
if isinstance(dout, np.void):
mask = _mask[indx]
# If we can make mvoid a subclass of np.void, that'd be what we'd need
# return mvoid(dout, mask=mask)
if flatten_mask(mask).any():
dout = mvoid(dout, mask=mask)
else:
return dout
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
else:
# Force dout to MA ........
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value ....
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long...
return dout
def __setitem__(self, indx, value):
"""x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError, 'Cannot alter the masked element.'
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError, msg
_data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass'))
_mask = ndarray.__getattribute__(self, '_mask')
if isinstance(indx, basestring):
ndarray.__setitem__(_data, indx, value)
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
#........................................
_dtype = ndarray.__getattribute__(_data, 'dtype')
nbfields = len(_dtype.names or ())
#........................................
if value is masked:
# The mask wasn't set: create a full version...
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
#........................................
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
ndarray.__setitem__(_mask, indx, mval)
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
if not self._isfield:
self.unshare_mask()
_mask = ndarray.__getattribute__(self, '_mask')
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
ndarray.__setitem__(_mask, indx, mval)
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
ndarray.__setitem__(_data, indx, dval)
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported..."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
dindx[~mindx] = dval
elif mindx is nomask:
dindx = dval
ndarray.__setitem__(_data, indx, dindx)
_mask[indx] = mindx
return
def __getslice__(self, i, j):
"""x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative
indices is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask
those locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""Set the mask.
"""
idtype = ndarray.__getattribute__(self, 'dtype')
current_mask = ndarray.__getattribute__(self, '_mask')
if mask is masked:
mask = True
# Make sure the mask is set
if (current_mask is nomask):
# Just don't do anything is there's nothing to do...
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
# No named fields.........
if idtype.names is None:
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
else:
current_mask.flat = mask
# Named fields w/ ............
else:
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()]*len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
#....
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some cases.
# return self._mask.reshape(self.shape)
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis= -1)
def _set_recordmask(self):
"""Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
#............................................
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
#............................................
baseclass = property(fget=lambda self:self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
#
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
#
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
#
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
#
if self is masked_singleton:
return np.asanyarray(fill_value)
#
if m.dtype.names:
result = self._data.copy()
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy()
try:
np.putmask(result, m, fill_value)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
#ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
r = np.array(self._data.tolist(), dtype=object)
np.putmask(r, m, f)
return str(tuple(r))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("|O8")
res[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "|O8")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
n = len(self.shape)
name = repr(self._data).split('(')[0]
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def __eq__(self, other):
"Check whether other equals self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = ndarray.__eq__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = ndarray.__ne__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __rtruediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(other, self)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __rfloordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(other, self)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#
def __rpow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(other, self)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __ifloordiv__(self, other):
"Floor divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
err_status = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
finally:
np.seterr(**err_status)
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.putmask(self._data, invalid, self.fill_value)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
raise MaskError, 'Cannot convert masked element to a Python int.'
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
#............................................
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
ls = len(s)
if m is nomask:
if ls == 0:
return 1
if ls == 1:
return s[0]
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.ones(t) * n
n1 = np.size(m, axis)
n2 = m.astype(int).sum(axis)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
# try:
# ndarray.resize(self, newshape, refcheck=refcheck)
# if self.mask is not nomask:
# self._mask.resize(newshape, refcheck=refcheck)
# except ValueError:
# raise ValueError("Cannot resize an array that has been referenced "
# "or is referencing another array in this way.\n"
# "Use the numpy.ma.resize function.")
# return None
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
#
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print x
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print x
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
m = self._mask
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
#....
self._data.put(indices, values, mode=mode)
#....
if m is nomask:
m = getmask(values)
else:
m = m.copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
#............................................
def ids (self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
#............................................
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.sum()
25
>>> print x.sum(axis=1)
[4 5 16]
>>> print x.sum(axis=0)
[8 5 12]
>>> print type(x.sum(axis=0, dtype=np.int64)[0])
<type 'numpy.int64'>
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print marr.cumsum()
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
dvar = sqrt(dvar)
if out is not None:
out **= 0.5
return out
return dvar
std.__doc__ = np.std.__doc__
#............................................
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
result._mask = self._mask
result._update_from(self)
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
#............................................
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
{ndarray, scalar}
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print x
[[-- --]
[2 3]]
>>> print x.argmin(axis=0, fill_value=-1)
[0 0]
>>> print x.argmin(axis=0, fill_value=9)
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis= -1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print a
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print a
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print a
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
idx = np.indices(self.shape)
idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
idx_l = idx.tolist()
tmp_mask = self._mask[idx_l].flat
tmp_data = self._data[idx_l].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
#............................................
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.putmask(result, newmask, result.fill_value)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.putmask(out, newmask, np.nan)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print x
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print x.mini(axis=1)
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
#........................
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.putmask(result, newmask, result.fill_value)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.putmask(out, newmask, np.nan)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
out -= self.min(axis=axis, fill_value=fill_value)
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods ---------------------------------------
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self:self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
#--------------------------------------------
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array .............
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays ...............
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays...
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
# if fill_value is not None:
# return self.filled(fill_value).tolist()
# result = self.filled().tolist()
# # Set temps to save time when dealing w/ mrecarrays...
# _mask = self._mask
# if _mask is nomask:
# return result
# nbdims = self.ndim
# dtypesize = len(self.dtype)
# if nbdims == 0:
# return tuple([None] * dtypesize)
# elif nbdims == 1:
# maskedidx = _mask.nonzero()[0].tolist()
# if dtypesize:
# nodata = tuple([None] * dtypesize)
# else:
# nodata = None
# [operator.setitem(result, i, nodata) for i in maskedidx]
# else:
# for idx in zip(*[i.tolist() for i in _mask.nonzero()]):
# tmp = result
# for i in idx[:-1]:
# tmp = tmp[i]
# tmp[idx[-1]] = None
# return result
#........................
def tostring(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tostring
tolist, tofile
Notes
-----
As for `ndarray.tostring`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tostring()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tostring(order=order)
#........................
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("Not implemented yet, sorry...")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.toflex()
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype ....
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
#
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tostring(cf),
#self._data.tolist(),
getmaskarray(self).tostring(cf),
#getmaskarray(self).tolist(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
#
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.iteritems():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
def __new__(self, data, mask=nomask, dtype=None, fill_value=None):
dtype = dtype or data.dtype
_data = ndarray((), dtype=dtype)
_data[()] = data
_data = _data.view(self)
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"Get the index..."
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
self._mask[indx] |= getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if (m is nomask):
return self._data.__str__()
m = tuple(m)
if (not any(m)):
return self._data.__str__()
r = self._data.tolist()
p = masked_print_option
if not p.enabled():
p = 'N/A'
else:
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)]
return "(%s)" % ", ".join(r)
def __repr__(self):
m = self._mask
if (m is nomask):
return self._data.__repr__()
m = tuple(m)
if not any(m):
return self._data.__repr__()
p = masked_print_option
if not p.enabled():
return self.filled(self.fill_value).__repr__()
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)]
return "(%s)" % ", ".join(r)
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void:
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
#####--------------------------------------------------------------------------
#---- --- Shortcuts ---
#####---------------------------------------------------------------------------
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
# We define the masked singleton as a float for higher precedence...
# Note that it can be tricky sometimes w/ type comparison
class MaskedConstant(MaskedArray):
#
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
#
def __new__(self):
return self._data.view(self)
#
def __array_finalize__(self, obj):
return
#
def __array_wrap__(self, obj):
return self
#
def __str__(self):
return str(masked_print_option._display)
#
def __repr__(self):
return 'masked'
#
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=False,
mask=nomask, fill_value=None,
keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0,
):
"""array(data, dtype=None, copy=False, order=False, mask=nomask,
fill_value=None, keep_mask=True, hard_mask=False, shrink=True,
subok=True, ndmin=0)
Acts as shortcut to MaskedArray, with options in a different order
for convenience. And backwards compatibility...
"""
#!!!: we should try to put 'order' somwehere
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok,
keep_mask=keep_mask, hard_mask=hard_mask,
fill_value=fill_value, ndmin=ndmin, shrink=shrink)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
#####---------------------------------------------------------------------------
#---- --- Extrema functions ---
#####---------------------------------------------------------------------------
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
#.........
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = { 'axis' : axis }
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
#.........
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
#............................
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
#............................
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
#..........................................................
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""a.ptp(axis=None) = a.max(axis)-a.min(axis)"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
#####---------------------------------------------------------------------------
#---- --- Definition of functions from the corresponding methods ---
#####---------------------------------------------------------------------------
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname):
self.__name__ = methodname
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None))
return doc
#
def __call__(self, a, *args, **params):
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress')
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
#..............................................................................
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError, "3-argument power not supported."
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
err_status = np.geterr()
try:
np.seterr(divide='ignore', invalid='ignore')
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
finally:
np.seterr(**err_status)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
# if fb.dtype.char in typecodes["Integer"]:
# return masked_array(umath.power(fa, fb), m)
# m = mask_or(m, (fa < 0) & (fb != fb.astype(int)))
# if m is nomask:
# return masked_array(umath.power(fa, fb))
# else:
# fa = fa.copy()
# if m.all():
# fa.flat = 1
# else:
# np.putmask(fa,m,1)
# return masked_array(umath.power(fa, fb), m)
#..............................................................................
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmin.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
# return
indx = np.indices(a.shape).tolist()
indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if getmask(x) is nomask:
return np.asanyarray(x)
else:
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask...
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that ...
# ... all of them are True, and then check for dm.any()
# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays])
# if shrink and not dm.any():
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
#......................................
def left_shift (a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift (a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
#......................................
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): #, mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.putmask(a._mask, mask, valmask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.putmask(m, mask, valmask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.putmask(a._mask, mask, valmask)
np.putmask(a._data, mask, valdata)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
#We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
#We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
#................................................
def rank(obj):
"maskedarray version of the numpy function."
return np.rank(getdata(obj))
rank.__doc__ = np.rank.__doc__
#
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
#
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
#................................................
#####--------------------------------------------------------------------------
#---- --- Extra functions ---
#####--------------------------------------------------------------------------
def where (condition, x=None, y=None):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print x
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print np.ma.where(x > 5, x, -3.1416)
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
if x is None and y is None:
return filled(condition, 0).nonzero()
elif x is None or y is None:
raise ValueError, "Either both or neither x and y should be given."
# Get the condition ...............
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data ......................................
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
_data = d._data
np.putmask(_data, fc, xv.astype(ndtype))
np.putmask(_data, notfc, yv.astype(ndtype))
# Create an empty mask and fill it
_mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
np.putmask(_mask, fc, getmask(x))
np.putmask(_mask, notfc, getmask(y))
_mask |= getmaskarray(condition)
if not _mask.any():
d._mask = nomask
return d
def choose (indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask (x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask (x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices......
c = filled(indices, 0)
# Get the masks........
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices......
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal (a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
#..............................................................................
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
#####--------------------------------------------------------------------------
#---- --- Pickling ---
#####--------------------------------------------------------------------------
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return cPickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return cPickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return cPickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return cPickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
raise NotImplementedError("Not yet implemented. Sorry")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
#
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
#
def __call__(self, a, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(a, *args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
###############################################################################
| mit |
hkawasaki/kawasaki-aio8-2 | cms/djangoapps/contentstore/management/commands/check_course.py | 22 | 2740 | from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import check_module_metadata_editability
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location
class Command(BaseCommand):
help = '''Enumerates through the course and find common errors'''
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("check_course requires one argument: <location>")
loc_str = args[0]
loc = CourseDescriptor.id_to_location(loc_str)
store = modulestore()
course = store.get_item(loc, depth=3)
err_cnt = 0
def _xlint_metadata(module):
err_cnt = check_module_metadata_editability(module)
for child in module.get_children():
err_cnt = err_cnt + _xlint_metadata(child)
return err_cnt
err_cnt = err_cnt + _xlint_metadata(course)
# we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict
def _check_xml_attributes_field(module):
err_cnt = 0
if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring):
print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location.url())
err_cnt = err_cnt + 1
for child in module.get_children():
err_cnt = err_cnt + _check_xml_attributes_field(child)
return err_cnt
err_cnt = err_cnt + _check_xml_attributes_field(course)
# check for dangling discussion items, this can cause errors in the forums
def _get_discussion_items(module):
discussion_items = []
if module.location.category == 'discussion':
discussion_items = discussion_items + [module.location.url()]
for child in module.get_children():
discussion_items = discussion_items + _get_discussion_items(child)
return discussion_items
discussion_items = _get_discussion_items(course)
# now query all discussion items via get_items() and compare with the tree-traversal
queried_discussion_items = store.get_items(
Location(
'i4x',
course.location.org,
course.location.course,
'discussion',
None,
None
)
)
for item in queried_discussion_items:
if item.location.url() not in discussion_items:
print 'Found dangling discussion module = {0}'.format(item.location.url())
| agpl-3.0 |
bbcf/bbcflib | bein/tests/test_bein.py | 1 | 13588 | import socket
import re
import sys
import random
from unittest2 import TestCase, TestSuite, main, TestLoader, skipIf
from bein import *
from bein.util import touch
M = MiniLIMS("testing_lims")
def hostname_contains(pattern):
hostname = socket.gethostbyaddr(socket.gethostname())[0]
if re.search(pattern, hostname) == None:
return False
else:
return True
try:
if hostname_contains('vital-it.ch'):
not_vital_it = False
else:
not_vital_it = True
except:
not_vital_it = True
@program
def count_lines(filename):
"""Count the number of lines in *filename* (equivalent to ``wc -l``)."""
def parse_output(p):
m = re.search(r'^\s*(\d+)\s+' + filename + r'\s*$',
''.join(p.stdout))
if m == None:
return None
else:
return int(m.groups()[-1]) # in case of a weird line in LSF
return {"arguments": ["wc","-l",filename],
"return_value": parse_output}
class TestProgramBinding(TestCase):
def test_binding_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
self.assertEqual(count_lines(ex, 'boris'), 3)
def test_local_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._local(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT.")
def test_lsf_works(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines._lsf(ex, 'boris')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_nonblocking_with_via_local(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='local')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
@skipIf(not_vital_it, "Not on VITAL-IT")
def test_nonblocking_with_via_lsf(self):
with execution(None) as ex:
with open('boris','w') as f:
f.write("This is a test\nof the emergency broadcast\nsystem.\n")
q = count_lines.nonblocking(ex, 'boris', via='lsf')
self.assertEqual(str(q.__class__), "<class 'bein.Future'>")
self.assertEqual(q.wait(), 3)
def test_syntaxerror_outside_execution(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch(ex)
def test_syntaxerror_outside_execution_nonblocking(self):
with execution(M) as ex:
pass
M.delete_execution(ex.id)
with self.assertRaises(SyntaxError):
touch.nonblocking(ex)
class TestUniqueFilenameIn(TestCase):
def test_state_determines_filename(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
random.setstate(st)
g = unique_filename_in()
self.assertEqual(f, g)
def test_unique_filename_exact_match(self):
with execution(None) as ex:
st = random.getstate()
f = touch(ex)
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
def test_unique_filename_beginnings_match(self):
with execution(None) as ex:
st = random.getstate()
f = unique_filename_in()
touch(ex, f + 'abcdefg')
random.setstate(st)
g = touch(ex)
self.assertNotEqual(f, g)
class TestMiniLIMS(TestCase):
def test_resolve_alias_exception_on_no_file(self):
with execution(None) as ex:
M = MiniLIMS("boris")
self.assertRaises(ValueError, M.resolve_alias, 55)
def test_resolve_alias_returns_int_if_exists(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
self.assertEqual(M.resolve_alias(a), a)
def test_resolve_alias_with_alias(self):
with execution(None) as ex:
f = touch(ex)
M = MiniLIMS("boris")
a = M.import_file(f)
M.add_alias(a, 'hilda')
self.assertEqual(M.resolve_alias('hilda'), a)
def test_path_to_file_on_execution(self):
with execution(None) as ignoreme:
f = touch(ignoreme)
M = MiniLIMS("boris")
fid = M.import_file(f)
mpath = M.path_to_file(fid)
with execution(M) as ex:
fpath = ex.path_to_file(fid)
self.assertEqual(mpath, fpath)
def test_search_files(self):
f_desc = unique_filename_in()
t1 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_id = M.import_file("../LICENSE", description=f_desc)
t2 = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
f_found = M.search_files(with_text="LICENSE", with_description=f_desc, older_than=t2, source="import", newer_than=t1)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
f_desc = {"name":"test_search_files_by_dict", "m":5, "n":15}
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.search_files(with_description=f_desc)
self.assertIn(f_id, f_found)
M.delete_file(f_id)
def test_search_executions(self):
with execution(M, description="desc_test") as ex:
pass
ex_found = M.search_executions(with_description="desc_test")
self.assertIn(ex.id,ex_found)
M.delete_execution(ex.id)
ex_desc = {"name":"test_search_ex_by_dict", "m":5, "n":15}
with execution(M, description=ex_desc) as ex:
pass
ex_found = M.search_executions(with_description=ex_desc)
self.assertIn(ex.id, ex_found)
try:
with execution(M, description="desc_test_fail") as ex_nofail:
3/0
except: pass
ex_found_nofail = M.search_executions(with_description="desc_test", fails=False)
for e in ex_found_nofail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNone(error)
ex_found_fail = M.search_executions(with_description="desc_test", fails=True)
for e in ex_found_fail:
error = M.fetch_execution(e)["exception_string"]
self.assertIsNotNone(error)
M.delete_execution(ex.id)
M.delete_execution(ex_nofail.id)
def test_browse_files(self):
f_desc = "browse_file_test"
f_id = M.import_file("../LICENSE", description=f_desc)
f_found = M.browse_files(with_description=f_desc)
#self.assertIn(f_id,f_found)
M.delete_file(f_id)
def test_browse_executions(self):
ex_desc = "browse_ex_test"
with execution(M, description=ex_desc) as ex:
touch(ex,"boris")
ex_found = M.browse_executions(with_description=ex_desc)
#self.assertIs(ex.id,ex_found)
M.delete_execution(ex.id)
class TestExportFile(TestCase):
def test_export_file(self):
filea = M.import_file("../LICENSE") #file ID
fileb = M.import_file("../doc/bein.rst")
testdir = "testing.files"
if not os.path.isdir(testdir):
os.mkdir(testdir)
M.associate_file(fileb,filea,template="%s.linked")
M.export_file(filea, dst=os.path.join(testdir,"exportedfile"), with_associated=True) #test with file name given
self.assertTrue(os.path.isfile(os.path.join(testdir,"exportedfile"+".linked")))
os.remove(os.path.join(testdir,"exportedfile"))
os.remove(os.path.join(testdir,"exportedfile"+".linked"))
M.export_file(filea, dst=testdir, with_associated=True) #test with directory given
filename = M.fetch_file(filea)['repository_name']
self.assertTrue(os.path.isfile(os.path.join(testdir, filename +".linked")))
os.remove(os.path.join(testdir, filename))
os.remove(os.path.join(testdir, filename +".linked"))
@program
def echo(s):
return {'arguments': ['echo',str(s)],
'return_value': None}
class TestStdoutStderrRedirect(TestCase):
def test_stdout_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
echo(ex, "boris!", stdout=f)
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
def test_stdout_local_redirected(self):
try:
with execution(M) as ex:
f = unique_filename_in()
m = echo.nonblocking(ex, "boris!", stdout=f)
m.wait()
with open(f) as q:
l = q.readline()
self.assertEqual(l, 'boris!\n')
finally:
M.delete_execution(ex.id)
class TestNoSuchProgramError(TestCase):
@program
def nonexistent():
return {"arguments": ["meepbarf","hilda"],
"return_value": None}
def test_nonexistent(self):
with execution(None) as ex:
self.assertRaises(ValueError, self.nonexistent, ex)
def test_nonexistent_local(self):
with execution(None) as ex:
f = self.nonexistent.nonblocking(ex, via="local")
self.assertRaises(ValueError, f.wait)
class TestImmutabilityDropped(TestCase):
def test_immutability_dropped(self):
executions = []
with execution(M) as ex:
touch(ex, "boris")
ex.add("boris")
exid1 = ex.id
borisid = M.search_files(source=('execution',ex.id))[0]
self.assertFalse(M.fetch_file(borisid)['immutable'])
with execution(M) as ex:
ex.use(borisid)
exid2 = ex.id
self.assertTrue(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid2)
self.assertFalse(M.fetch_file(borisid)['immutable'])
M.delete_execution(exid1)
self.assertEqual(M.search_files(source=('execution',exid1)), [])
class TestAssociatePreservesFilenames(TestCase):
def test_associate_with_names(self):
try:
with execution(M) as ex:
touch(ex, "boris")
touch(ex, "hilda")
ex.add("boris")
ex.add("hilda", associate_to_filename="boris", template="%s.meep")
boris_id = M.search_files(source=('execution',ex.id), with_text="boris")[0]
hilda_id = M.search_files(source=('execution',ex.id), with_text="hilda")[0]
boris_name = M.fetch_file(boris_id)['repository_name']
hilda_name = M.fetch_file(hilda_id)['repository_name']
self.assertEqual("%s.meep" % boris_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
def test_associate_with_id(self):
try:
fid = M.import_file('test.py')
with execution(M) as ex:
touch(ex, "hilda")
ex.add("hilda", associate_to_id=fid, template="%s.meep")
hilda_id = M.search_files(source=('execution',ex.id))[0]
hilda_name = M.fetch_file(hilda_id)['repository_name']
fid_name = M.fetch_file(fid)['repository_name']
self.assertEqual("%s.meep" % fid_name, hilda_name)
finally:
try:
M.delete_execution(ex.id)
M.delete_file(fid)
except:
pass
def test_hierarchical_association(self):
try:
with execution(M) as ex:
touch(ex, "a")
touch(ex, "b")
touch(ex, "c")
ex.add("a")
ex.add("b", associate_to_filename="a", template="%s.step")
ex.add("c", associate_to_filename="b", template="%s.step")
a_id = M.search_files(source=('execution',ex.id), with_text='a')[0]
b_id = M.search_files(source=('execution',ex.id), with_text='b')[0]
c_id = M.search_files(source=('execution',ex.id), with_text='c')[0]
a_name = M.fetch_file(a_id)['repository_name']
b_name = M.fetch_file(b_id)['repository_name']
c_name = M.fetch_file(c_id)['repository_name']
self.assertEqual("%s.step" % a_name, b_name)
self.assertEqual("%s.step.step" % a_name, c_name)
finally:
try:
M.delete_execution(ex.id)
except:
pass
#def test_given(tests):
# module = sys.modules[__name__]
# if tests == None:
# defaultTest = None
# else:
# loader = TestLoader()
# defaultTest = TestSuite()
# tests = loader.loadTestsFromNames(tests, module)
# defaultTest.addTests(tests)
# main(defaultTest=defaultTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_given(sys.argv[1:])
else:
test_given(None)
| gpl-3.0 |
dawnpower/nova | nova/openstack/common/imageutils.py | 64 | 6349 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
"""
import re
from oslo_utils import strutils
from nova.openstack.common._i18n import _
class QemuImgInfo(object):
BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
r"\s+(.*?)\)\s*$"), re.I)
TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
SIZE_RE = re.compile(r"(\d*\.?\d+)(\w+)?(\s*\(\s*(\d+)\s+bytes\s*\))?",
re.I)
def __init__(self, cmd_output=None):
details = self._parse(cmd_output or '')
self.image = details.get('image')
self.backing_file = details.get('backing_file')
self.file_format = details.get('file_format')
self.virtual_size = details.get('virtual_size')
self.cluster_size = details.get('cluster_size')
self.disk_size = details.get('disk_size')
self.snapshots = details.get('snapshot_list', [])
self.encrypted = details.get('encrypted')
def __str__(self):
lines = [
'image: %s' % self.image,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size,
'cluster_size: %s' % self.cluster_size,
'backing_file: %s' % self.backing_file,
]
if self.snapshots:
lines.append("snapshots: %s" % self.snapshots)
if self.encrypted:
lines.append("encrypted: %s" % self.encrypted)
return "\n".join(lines)
def _canonicalize(self, field):
# Standardize on underscores/lc/no dash and no spaces
# since qemu seems to have mixed outputs here... and
# this format allows for better integration with python
# - i.e. for usage in kwargs and such...
field = field.lower().strip()
for c in (" ", "-"):
field = field.replace(c, '_')
return field
def _extract_bytes(self, details):
# Replace it with the byte amount
real_size = self.SIZE_RE.search(details)
if not real_size:
raise ValueError(_('Invalid input value "%s".') % details)
magnitude = real_size.group(1)
unit_of_measure = real_size.group(2)
bytes_info = real_size.group(3)
if bytes_info:
return int(real_size.group(4))
elif not unit_of_measure:
return int(magnitude)
return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure),
return_int=True)
def _extract_details(self, root_cmd, root_details, lines_after):
real_details = root_details
if root_cmd == 'backing_file':
# Replace it with the real backing file
backing_match = self.BACKING_FILE_RE.match(root_details)
if backing_match:
real_details = backing_match.group(2).strip()
elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
# Replace it with the byte amount (if we can convert it)
if root_details == 'None':
real_details = 0
else:
real_details = self._extract_bytes(root_details)
elif root_cmd == 'file_format':
real_details = real_details.strip().lower()
elif root_cmd == 'snapshot_list':
# Next line should be a header, starting with 'ID'
if not lines_after or not lines_after.pop(0).startswith("ID"):
msg = _("Snapshot list encountered but no header found!")
raise ValueError(msg)
real_details = []
# This is the sprintf pattern we will try to match
# "%-10s%-20s%7s%20s%15s"
# ID TAG VM SIZE DATE VM CLOCK (current header)
while lines_after:
line = lines_after[0]
line_pieces = line.split()
if len(line_pieces) != 6:
break
# Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
break
lines_after.pop(0)
real_details.append({
'id': line_pieces[0],
'tag': line_pieces[1],
'vm_size': line_pieces[2],
'date': line_pieces[3],
'vm_clock': line_pieces[4] + " " + line_pieces[5],
})
return real_details
def _parse(self, cmd_output):
# Analysis done of qemu-img.c to figure out what is going on here
# Find all points start with some chars and then a ':' then a newline
# and then handle the results of those 'top level' items in a separate
# function.
#
# TODO(harlowja): newer versions might have a json output format
# we should switch to that whenever possible.
# see: http://bit.ly/XLJXDX
contents = {}
lines = [x for x in cmd_output.splitlines() if x.strip()]
while lines:
line = lines.pop(0)
top_level = self.TOP_LEVEL_RE.match(line)
if top_level:
root = self._canonicalize(top_level.group(1))
if not root:
continue
root_details = top_level.group(2).strip()
details = self._extract_details(root, root_details, lines)
contents[root] = details
return contents
| apache-2.0 |
Turkingwang/dpkt | dpkt/snoop.py | 22 | 2963 | # $Id$
"""Snoop file format."""
import sys, time
import dpkt
# RFC 1761
SNOOP_MAGIC = 0x736E6F6F70000000L
SNOOP_VERSION = 2
SDL_8023 = 0
SDL_8024 = 1
SDL_8025 = 2
SDL_8026 = 3
SDL_ETHER = 4
SDL_HDLC = 5
SDL_CHSYNC = 6
SDL_IBMCC = 7
SDL_FDDI = 8
SDL_OTHER = 9
dltoff = { SDL_ETHER:14 }
class PktHdr(dpkt.Packet):
"""snoop packet header."""
__byte_order__ = '!'
__hdr__ = (
('orig_len', 'I', 0),
('incl_len', 'I', 0),
('rec_len', 'I', 0),
('cum_drops', 'I', 0),
('ts_sec', 'I', 0),
('ts_usec', 'I', 0),
)
class FileHdr(dpkt.Packet):
"""snoop file header."""
__byte_order__ = '!'
__hdr__ = (
('magic', 'Q', SNOOP_MAGIC),
('v', 'I', SNOOP_VERSION),
('linktype', 'I', SDL_ETHER),
)
class Writer(object):
"""Simple snoop dumpfile writer."""
def __init__(self, fileobj, linktype=SDL_ETHER):
self.__f = fileobj
fh = FileHdr(linktype=linktype)
self.__f.write(str(fh))
def writepkt(self, pkt, ts=None):
if ts is None:
ts = time.time()
s = str(pkt)
n = len(s)
pad_len = 4 - n % 4 if n % 4 else 0
ph = PktHdr(orig_len=n,incl_len=n,
rec_len=PktHdr.__hdr_len__+n+pad_len,
ts_sec=int(ts),
ts_usec=int((int(ts) - float(ts)) * 1000000.0))
self.__f.write(str(ph))
self.__f.write(s + '\0' * pad_len)
def close(self):
self.__f.close()
class Reader(object):
"""Simple pypcap-compatible snoop file reader."""
def __init__(self, fileobj):
self.name = fileobj.name
self.fd = fileobj.fileno()
self.__f = fileobj
buf = self.__f.read(FileHdr.__hdr_len__)
self.__fh = FileHdr(buf)
self.__ph = PktHdr
if self.__fh.magic != SNOOP_MAGIC:
raise ValueError, 'invalid snoop header'
self.dloff = dltoff[self.__fh.linktype]
self.filter = ''
def fileno(self):
return self.fd
def datalink(self):
return self.__fh.linktype
def setfilter(self, value, optimize=1):
return NotImplementedError
def readpkts(self):
return list(self)
def dispatch(self, cnt, callback, *args):
if cnt > 0:
for i in range(cnt):
ts, pkt = self.next()
callback(ts, pkt, *args)
else:
for ts, pkt in self:
callback(ts, pkt, *args)
def loop(self, callback, *args):
self.dispatch(0, callback, *args)
def __iter__(self):
self.__f.seek(FileHdr.__hdr_len__)
while 1:
buf = self.__f.read(PktHdr.__hdr_len__)
if not buf: break
hdr = self.__ph(buf)
buf = self.__f.read(hdr.rec_len - PktHdr.__hdr_len__)
yield (hdr.ts_sec + (hdr.ts_usec / 1000000.0), buf[:hdr.incl_len])
| bsd-3-clause |
amanikamail/flexx | docs/scripts/genuiclasses.py | 20 | 3063 | """ Generate docs for ui classes.
"""
import os
from types import ModuleType
from flexx import ui, app
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
OUTPUT_DIR = os.path.join(DOC_DIR, 'ui')
created_files = []
def main():
pages = {}
class_names = []
# Get all pages and class names
for mod in ui.__dict__.values():
if isinstance(mod, ModuleType):
classes = []
for w in mod.__dict__.values():
if isinstance(w, type) and issubclass(w, ui.Widget):
if w.__module__ == mod.__name__:
classes.append(w)
if classes:
classes.sort(key=lambda x: len(x.mro()))
class_names.extend([w.__name__ for w in classes])
pages[mod.__name__] = classes
# Create page for each module
for module_name, classes in sorted(pages.items()):
page_name = module_name.split('.')[-1].strip('_').capitalize()
docs = '%s\n%s\n\n' % (page_name, '-' * len(page_name))
docs += '.. automodule:: %s\n\n' % module_name
docs += '----\n\n'
for cls in classes:
name = cls.__name__
# Insert info on base clases
if 'Inherits from' not in cls.__doc__:
bases = [':class:`%s <flexx.ui.%s>`' % (bcls.__name__, bcls.__name__)
for bcls in cls.__bases__]
line = 'Inherits from: ' + ', '.join(bases)
cls.__doc__ = line + '\n\n' + (cls.__doc__ or '')
# Create doc for class
docs += '.. autoclass:: flexx.ui.%s\n' % name
docs += ' :members:\n\n'
# Write doc page
filename = os.path.join(OUTPUT_DIR, page_name.lower() + '.rst')
created_files.append(filename)
open(filename, 'wt').write(docs)
# Create overview doc page
docs = 'Ui API'
docs += '\n' + '=' * len(docs) + '\n\n'
docs += 'This is a list of all widget classes provided by ``flexx.ui``. '
docs += ':class:`Widget <flexx.ui.Widget>` is the base class of all widgets. '
docs += 'There is one document per widget type. Each document contains '
docs += 'examples with the widget(s) defined within.\n\n'
for name in sorted(class_names):
docs += '* :class:`%s <flexx.ui.%s>`\n' % (name, name)
docs += '\n.. toctree::\n :maxdepth: 1\n :hidden:\n\n'
for module_name in sorted(pages.keys()):
docs += ' %s\n' % module_name.split('.')[-1].strip('_').lower()
# Write overview doc page
filename = os.path.join(OUTPUT_DIR, 'api.rst')
created_files.append(filename)
open(filename, 'wt').write(docs)
print(' generated widget docs with %i pages and %i widgets' % (len(pages), len(class_names)))
def clean():
while created_files:
filename = created_files.pop()
if os.path.isfile(filename):
os.remove(filename)
| bsd-2-clause |
eeshangarg/oh-mainline | vendor/packages/django-extensions/django_extensions/management/commands/sqldiff.py | 17 | 30783 | """
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's know way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
from django.core.management.base import BaseCommand
from django.core.management import sql as _sql
from django.core.management import CommandError
from django.core.management.color import no_style
from django.db import transaction, connection
from django.db.models.fields import IntegerField
from optparse import make_option
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = meta.local_fields[:]
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'field-missing-in-db',
'field-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
def __init__(self, app_models, options):
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
self.differences = []
self.unknown_db_fields = {}
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
}
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
#(name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if not reverse_type in ('TextField', 'CharField'):
kwargs['null'] = True
if '.' in reverse_type:
from django.utils import importlib
# TODO: when was importlib added to django.utils ? and do we
# need to add backwards compatibility code ?
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
return field_db_type
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0]
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_opts['unique'] and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if not attname in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname)
def find_index_missing_in_model(self, meta, table_indexes, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_name in fields:
field = fields[att_name]
if field.db_index:
continue
if att_opts['primary_key'] and field.primary_key:
continue
if att_opts['unique'] and field.unique:
continue
if att_opts['unique'] and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in fieldmap.iteritems():
if field_name not in db_fields:
self.add_difference('field-missing-in-db', table_name, field_name,
field.db_type(connection=connection))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.strip_parameters(self.get_field_model_type(field))
db_type = self.strip_parameters(self.get_field_db_type(description, field))
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
@transaction.commit_manually
def find_differences(self):
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
#if not table_name in self.django_tables:
if not table_name in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception, e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
else:
transaction.commit()
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)
cur_app_label = app_label
if not self.dense:
print style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print style.NOTICE("|--+"), text
else:
print style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text
def print_diff_sql(self, style):
cur_app_label = None
qn = connection.ops.quote_name
has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences])
if not has_differences:
if not self.dense:
print style.SQL_KEYWORD("-- No differences")
else:
print style.SQL_KEYWORD("BEGIN;")
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label))
cur_app_label = app_label
if not self.dense:
print style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print text
print style.SQL_KEYWORD("COMMIT;")
class GenericSQLDiff(SQLDiff):
pass
class MySQLDiff(SQLDiff):
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
# weird bug? in mysql db-api where it returns three times the correct value for field length
# if i remember correctly it had something todo with unicode strings
# TODO: Fix this is a more meaningful and better understood manner
description = list(description)
if description[1] not in [FIELD_TYPE.TINY, FIELD_TYPE.SHORT]: # exclude tinyints from conversion.
description[3] = description[3] / 3
description[4] = description[4] / 3
db_type = super(MySQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and (db_type == 'integer' or db_type == 'bigint'):
db_type += ' AUTO_INCREMENT'
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if db_type == 'integer' and description[1] == FIELD_TYPE.SHORT:
db_type = 'smallint UNSIGNED' # FIXME: what about if it's not UNSIGNED ?
return db_type
class SqliteSQLDiff(SQLDiff):
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
def __init__(self, app_models, options):
SQLDiff.__init__(self, app_models, options)
self.check_constraints = {}
self.load_constraints()
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and db_type == 'integer':
db_type = 'serial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
check_constraint = self.check_constraints.get((tablespace, table_name, field.attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
return db_type
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differs(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgresql_psycopg2' : PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS."),
make_option('--not-only-existing', '-e', action='store_false', dest='only_existing',
help="Check all tables that exist in the database, not only tables that should exist based on models."),
make_option('--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is spreaded over multiple lines."),
make_option('--output_text', '-t', action='store_false', dest='sql', default=True,
help="Outputs the differences as descriptive text instead of SQL"),
)
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django import VERSION
if VERSION[:2] < (1, 0):
raise CommandError("SQLDiff only support Django 1.0 or higher!")
from django.db import models
from django.conf import settings
if settings.DATABASE_ENGINE == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't specified the DATABASE_ENGINE setting.\n" +
"Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.")
if options.get('all_applications', False):
app_models = models.get_models()
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (models.ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
app_models = []
for app in app_list:
app_models.extend(models.get_models(app))
## remove all models that are not managed by Django
#app_models = [model for model in app_models if getattr(model._meta, 'managed', True)]
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
engine = settings.DATABASE_ENGINE
if not engine:
engine = connection.__module__.split('.')[-2]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
sqldiff_instance.print_diff(self.style)
return
| agpl-3.0 |
jjs0sbw/CSPLN | apps/scaffolding/linux/web2py/gluon/contrib/fpdf/php.py | 13 | 1256 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# fpdf php helpers:
def substr(s, start, length=-1):
if length < 0:
length=len(s)-start
return s[start:start+length]
def sprintf(fmt, *args): return fmt % args
def print_r(array):
if not isinstance(array, dict):
array = dict([(k, k) for k in array])
for k, v in array.items():
print "[%s] => %s" % (k, v),
def UTF8ToUTF16BE(instr, setbom=True):
"Converts UTF-8 strings to UTF16-BE."
outstr = ""
if (setbom):
outstr += "\xFE\xFF";
if not isinstance(instr, unicode):
instr = instr.decode('UTF-8')
outstr += instr.encode('UTF-16BE')
return outstr
def UTF8StringToArray(instr):
"Converts UTF-8 strings to codepoints array"
return [ord(c) for c in instr]
# ttfints php helpers:
def die(msg):
raise RuntimeError(msg)
def str_repeat(s, count):
return s * count
def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ):
if pad_type<0: # pad left
return s.rjust(pad_length, pad_char)
elif pad_type>0: # pad right
return s.ljust(pad_length, pad_char)
else: # pad both
return s.center(pad_length, pad_char)
strlen = count = lambda s: len(s) | gpl-3.0 |
rismalrv/edx-platform | common/djangoapps/course_modes/views.py | 62 | 10994 | """
Views for the course_mode module
"""
import decimal
from ipware.ip import get_ip
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.views.generic.base import View
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from edxmako.shortcuts import render_to_response
from course_modes.models import CourseMode
from courseware.access import has_access
from student.models import CourseEnrollment
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from util.db import commit_on_success_with_read_committed
from xmodule.modulestore.django import modulestore
from embargo import api as embargo_api
class ChooseModeView(View):
"""View used when the user is asked to pick a mode.
When a get request is used, shows the selection page.
When a post request is used, assumes that it is a form submission
from the selection page, parses the response, and then sends user
to the next step in the flow.
"""
@method_decorator(login_required)
def get(self, request, course_id, error=None):
"""Displays the course mode choice page.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Keyword Args:
error (unicode): If provided, display this error message
on the page.
Returns:
Response
"""
course_key = CourseKey.from_string(course_id)
# Check whether the user has access to this course
# based on country access rules.
embargo_redirect = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if embargo_redirect:
return redirect(embargo_redirect)
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(request.user, course_key)
modes = CourseMode.modes_for_course_dict(course_key)
# We assume that, if 'professional' is one of the modes, it is the *only* mode.
# If we offer more modes alongside 'professional' in the future, this will need to route
# to the usual "choose your track" page same is true for no-id-professional mode.
has_enrolled_professional = (CourseMode.is_professional_slug(enrollment_mode) and is_active)
if CourseMode.has_professional_mode(modes) and not has_enrolled_professional:
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
# If there isn't a verified mode available, then there's nothing
# to do on this page. The user has almost certainly been auto-registered
# in the "honor" track by this point, so we send the user
# to the dashboard.
if not CourseMode.has_verified_mode(modes):
return redirect(reverse('dashboard'))
# If a user has already paid, redirect them to the dashboard.
if is_active and (enrollment_mode in CourseMode.VERIFIED_MODES + [CourseMode.NO_ID_PROFESSIONAL_MODE]):
return redirect(reverse('dashboard'))
donation_for_course = request.session.get("donation_for_course", {})
chosen_price = donation_for_course.get(unicode(course_key), None)
course = modulestore().get_course(course_key)
# When a credit mode is available, students will be given the option
# to upgrade from a verified mode to a credit mode at the end of the course.
# This allows students who have completed photo verification to be eligible
# for univerity credit.
# Since credit isn't one of the selectable options on the track selection page,
# we need to check *all* available course modes in order to determine whether
# a credit mode is available. If so, then we show slightly different messaging
# for the verified track.
has_credit_upsell = any(
CourseMode.is_credit_mode(mode) for mode
in CourseMode.modes_for_course(course_key, only_selectable=False)
)
context = {
"course_modes_choose_url": reverse("course_modes_choose", kwargs={'course_id': course_key.to_deprecated_string()}),
"modes": modes,
"has_credit_upsell": has_credit_upsell,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"chosen_price": chosen_price,
"error": error,
"responsive": True,
"nav_hidden": True,
}
if "verified" in modes:
context["suggested_prices"] = [
decimal.Decimal(x.strip())
for x in modes["verified"].suggested_prices.split(",")
if x.strip()
]
context["currency"] = modes["verified"].currency.upper()
context["min_price"] = modes["verified"].min_price
context["verified_name"] = modes["verified"].name
context["verified_description"] = modes["verified"].description
return render_to_response("course_modes/choose.html", context)
@method_decorator(login_required)
@method_decorator(commit_on_success_with_read_committed)
def post(self, request, course_id):
"""Takes the form submission from the page and parses it.
Args:
request (`Request`): The Django Request object.
course_id (unicode): The slash-separated course key.
Returns:
Status code 400 when the requested mode is unsupported. When the honor mode
is selected, redirects to the dashboard. When the verified mode is selected,
returns error messages if the indicated contribution amount is invalid or
below the minimum, otherwise redirects to the verification flow.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = request.user
# This is a bit redundant with logic in student.views.change_enrollment,
# but I don't really have the time to refactor it more nicely and test.
course = modulestore().get_course(course_key)
if not has_access(user, 'enroll', course):
error_msg = _("Enrollment is closed")
return self.get(request, course_id, error=error_msg)
requested_mode = self._get_requested_mode(request.POST)
allowed_modes = CourseMode.modes_for_course_dict(course_key)
if requested_mode not in allowed_modes:
return HttpResponseBadRequest(_("Enrollment mode not supported"))
if requested_mode == 'honor':
# The user will have already been enrolled in the honor mode at this
# point, so we just redirect them to the dashboard, thereby avoiding
# hitting the database a second time attempting to enroll them.
return redirect(reverse('dashboard'))
mode_info = allowed_modes[requested_mode]
if requested_mode == 'verified':
amount = request.POST.get("contribution") or \
request.POST.get("contribution-other-amt") or 0
try:
# Validate the amount passed in and force it into two digits
amount_value = decimal.Decimal(amount).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
error_msg = _("Invalid amount selected.")
return self.get(request, course_id, error=error_msg)
# Check for minimum pricing
if amount_value < mode_info.min_price:
error_msg = _("No selected price or selected price is too low.")
return self.get(request, course_id, error=error_msg)
donation_for_course = request.session.get("donation_for_course", {})
donation_for_course[unicode(course_key)] = amount_value
request.session["donation_for_course"] = donation_for_course
return redirect(
reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(course_key)}
)
)
def _get_requested_mode(self, request_dict):
"""Get the user's requested mode
Args:
request_dict (`QueryDict`): A dictionary-like object containing all given HTTP POST parameters.
Returns:
The course mode slug corresponding to the choice in the POST parameters,
None if the choice in the POST parameters is missing or is an unsupported mode.
"""
if 'verified_mode' in request_dict:
return 'verified'
if 'honor_mode' in request_dict:
return 'honor'
else:
return None
def create_mode(request, course_id):
"""Add a mode to the course corresponding to the given course ID.
Only available when settings.FEATURES['MODE_CREATION_FOR_TESTING'] is True.
Attempts to use the following querystring parameters from the request:
`mode_slug` (str): The mode to add, either 'honor', 'verified', or 'professional'
`mode_display_name` (str): Describes the new course mode
`min_price` (int): The minimum price a user must pay to enroll in the new course mode
`suggested_prices` (str): Comma-separated prices to suggest to the user.
`currency` (str): The currency in which to list prices.
By default, this endpoint will create an 'honor' mode for the given course with display name
'Honor Code', a minimum price of 0, no suggested prices, and using USD as the currency.
Args:
request (`Request`): The Django Request object.
course_id (unicode): A course ID.
Returns:
Response
"""
PARAMETERS = {
'mode_slug': u'honor',
'mode_display_name': u'Honor Code Certificate',
'min_price': 0,
'suggested_prices': u'',
'currency': u'usd',
}
# Try pulling querystring parameters out of the request
for parameter, default in PARAMETERS.iteritems():
PARAMETERS[parameter] = request.GET.get(parameter, default)
# Attempt to create the new mode for the given course
course_key = CourseKey.from_string(course_id)
CourseMode.objects.get_or_create(course_id=course_key, **PARAMETERS)
# Return a success message and a 200 response
return HttpResponse("Mode '{mode_slug}' created for '{course}'.".format(
mode_slug=PARAMETERS['mode_slug'],
course=course_id
))
| agpl-3.0 |
stevenwudi/Kernelized_Correlation_Filter | CNN_training.py | 1 | 3640 | import numpy as np
from keras.optimizers import SGD
from models.CNN_CIFAR import cnn_cifar_batchnormalisation, cnn_cifar_small, cnn_cifar_nodropout, \
cnn_cifar_small_batchnormalisation
from models.DataLoader import DataLoader
from scripts.progress_bar import printProgress
from time import time, localtime
# this is a predefined dataloader
loader = DataLoader(batch_size=32)
# construct the model here (pre-defined model)
model = cnn_cifar_small_batchnormalisation(loader.image_shape)
print(model.name)
nb_epoch = 200
early_stopping = True
early_stopping_count = 0
early_stopping_wait = 3
train_loss = []
valid_loss = []
learning_rate = [0.0001, 0.001, 0.01]
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=learning_rate[-1], decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
# load validation data from the h5py file (heavy lifting here)
x_valid, y_valid = loader.get_valid()
best_valid = np.inf
for e in range(nb_epoch):
print("epoch %d" % e)
loss_list = []
time_list = []
time_start = time()
for i in range(loader.n_iter_train):
time_start_batch = time()
X_batch, Y_batch = loader.next_train_batch()
loss_list.append(model.train_on_batch(X_batch, Y_batch))
# calculate some time information
time_list.append(time() - time_start_batch)
eta = (loader.n_iter_train - i) * np.array(time_list).mean()
printProgress(i, loader.n_iter_train-1, prefix='Progress:', suffix='batch error: %0.5f, ETA: %0.2f sec.'%(np.array(loss_list).mean(), eta), barLength=50)
printProgress(i, loader.n_iter_train - 1, prefix='Progress:', suffix='batch error: %0.5f' % (np.array(loss_list).mean()), barLength=50)
train_loss.append(np.asarray(loss_list).mean())
print('training loss is %f, one epoch uses: %0.2f sec' % (train_loss[-1], time() - time_start))
valid_loss.append(model.evaluate(x_valid, y_valid))
print('valid loss is %f' % valid_loss[-1])
if best_valid > valid_loss[-1]:
early_stopping_count = 0
print('saving best valid result...')
best_valid = valid_loss[-1]
model.save('./models/CNN_Model_OBT100_multi_cnn_best_valid_'+model.name+'.h5')
else:
# we wait for early stopping loop until a certain time
early_stopping_count += 1
if early_stopping_count > early_stopping_wait:
early_stopping_count = 0
if len(learning_rate) > 1:
learning_rate.pop()
print('decreasing the learning rate to: %f'%learning_rate[-1])
model.optimizer.lr.set_value(learning_rate[-1])
else:
break
lt = localtime()
lt_str = str(lt.tm_year)+"."+str(lt.tm_mon).zfill(2)+"." \
+str(lt.tm_mday).zfill(2)+"."+str(lt.tm_hour).zfill(2)+"."\
+str(lt.tm_min).zfill(2)+"."+str(lt.tm_sec).zfill(2)
np.savetxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt', train_loss)
np.savetxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt', valid_loss)
model.save('./models/CNN_Model_OBT100_multi_cnn_'+model.name+'_final.h5')
print("done")
#### we show some visualisation here
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_loss = np.loadtxt('./models/train_loss_'+model.name+'_'+lt_str+'.txt')
valid_loss = np.loadtxt('./models/valid_loss_'+model.name+'_'+lt_str+'.txt')
plt.plot(train_loss, 'b')
plt.plot(valid_loss, 'r')
blue_label = mpatches.Patch(color='blue', label='train_loss')
red_label = mpatches.Patch(color='red', label='valid_loss')
plt.legend(handles=[blue_label, red_label])
| gpl-3.0 |
wakatime/wakatime | wakatime/packages/py27/pygments/lexers/sas.py | 4 | 9449 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sas
~~~~~~~~~~~~~~~~~~~
Lexer for SAS.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, String, Text, \
Other, Generic
__all__ = ['SASLexer']
class SASLexer(RegexLexer):
"""
For `SAS <http://www.sas.com/>`_ files.
.. versionadded:: 2.2
"""
# Syntax from syntax/sas.vim by James Kidd <[email protected]>
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
flags = re.IGNORECASE | re.MULTILINE
builtins_macros = (
"bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
"display", "do", "else", "end", "eval", "global", "goto", "if",
"index", "input", "keydef", "label", "left", "length", "let",
"local", "lowcase", "macro", "mend", "nrquote",
"nrstr", "put", "qleft", "qlowcase", "qscan",
"qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
"str", "substr", "superq", "syscall", "sysevalf", "sysexec",
"sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
"then", "to", "trim", "unquote", "until", "upcase", "verify",
"while", "window"
)
builtins_conditionals = (
"do", "if", "then", "else", "end", "until", "while"
)
builtins_statements = (
"abort", "array", "attrib", "by", "call", "cards", "cards4",
"catname", "continue", "datalines", "datalines4", "delete", "delim",
"delimiter", "display", "dm", "drop", "endsas", "error", "file",
"filename", "footnote", "format", "goto", "in", "infile", "informat",
"input", "keep", "label", "leave", "length", "libname", "link",
"list", "lostcard", "merge", "missing", "modify", "options", "output",
"out", "page", "put", "redirect", "remove", "rename", "replace",
"retain", "return", "select", "set", "skip", "startsas", "stop",
"title", "update", "waitsas", "where", "window", "x", "systask"
)
builtins_sql = (
"add", "and", "alter", "as", "cascade", "check", "create",
"delete", "describe", "distinct", "drop", "foreign", "from",
"group", "having", "index", "insert", "into", "in", "key", "like",
"message", "modify", "msgtype", "not", "null", "on", "or",
"order", "primary", "references", "reset", "restrict", "select",
"set", "table", "unique", "update", "validate", "view", "where"
)
builtins_functions = (
"abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
"attrn", "band", "betainv", "blshift", "bnot", "bor",
"brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
"close", "cnonct", "collate", "compbl", "compound",
"compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
"daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
"datejul", "datepart", "datetime", "day", "dclose", "depdb",
"depdbsl", "depsl", "depsyd",
"deptab", "dequote", "dhms", "dif", "digamma",
"dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
"dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
"fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
"fexist", "fget", "fileexist", "filename", "fileref",
"finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
"fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
"fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
"fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
"hbound", "hms", "hosthelp", "hour", "ibessel", "index",
"indexc", "indexw", "input", "inputc", "inputn", "int",
"intck", "intnx", "intrr", "irr", "jbessel", "juldate",
"kurtosis", "lag", "lbound", "left", "length", "lgamma",
"libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
"logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
"mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
"normal", "note", "npv", "open", "ordinal", "pathname",
"pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
"probbeta", "probbnml", "probchi", "probf", "probgam",
"probhypr", "probit", "probnegb", "probnorm", "probt",
"put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
"ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
"rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
"rewind", "right", "round", "saving", "scan", "sdf", "second",
"sign", "sin", "sinh", "skewness", "soundex", "spedis",
"sqrt", "std", "stderr", "stfips", "stname", "stnamel",
"substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
"sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
"tnonct", "today", "translate", "tranwrd", "trigamma",
"trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
"varfmt", "varinfmt", "varlabel", "varlen", "varname",
"varnum", "varray", "varrayx", "vartype", "verify", "vformat",
"vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
"vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
"vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
"vinformatw", "vinformatwx", "vinformatx", "vlabel",
"vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
"vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
"zipnamel", "zipstate"
)
tokens = {
'root': [
include('comments'),
include('proc-data'),
include('cards-datalines'),
include('logs'),
include('general'),
(r'.', Text),
],
# SAS is multi-line regardless, but * is ended by ;
'comments': [
(r'^\s*\*.*?;', Comment),
(r'/\*.*?\*/', Comment),
(r'^\s*\*(.|\n)*?;', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Special highlight for proc, data, quit, run
'proc-data': [
(r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
Keyword.Reserved),
],
# Special highlight cards and datalines
'cards-datalines': [
(r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
],
'data': [
(r'(.|\n)*^\s*;\s*$', Other, '#pop'),
],
# Special highlight for put NOTE|ERROR|WARNING (order matters)
'logs': [
(r'\n?^\s*%?put ', Keyword, 'log-messages'),
],
'log-messages': [
(r'NOTE(:|-).*', Generic, '#pop'),
(r'WARNING(:|-).*', Generic.Emph, '#pop'),
(r'ERROR(:|-).*', Generic.Error, '#pop'),
include('general'),
],
'general': [
include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
],
# Keywords, statements, functions, macros
'keywords': [
(words(builtins_statements,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_sql,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_conditionals,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_macros,
prefix = r'%',
suffix = r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix = r'\b',
suffix = r'\('),
Name.Builtin),
],
# Strings and user-defined variables and macros (order matters)
'vars-strings': [
(r'&[a-z_]\w{0,31}\.?', Name.Variable),
(r'%[a-z_]\w{0,31}', Name.Function),
(r'\'', String, 'string_squote'),
(r'"', String, 'string_dquote'),
],
'string_squote': [
('\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
# AFAIK, macro variables are not evaluated in single quotes
# (r'&', Name.Variable, 'validvar'),
(r'[^$\'\\]+', String),
(r'[$\'\\]', String),
],
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'&', Name.Variable, 'validvar'),
(r'[^$&"\\]+', String),
(r'[$"\\]', String),
],
'validvar': [
(r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
],
# SAS numbers and special variables
'numbers': [
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
Number),
],
'special': [
(r'(null|missing|_all_|_automatic_|_character_|_n_|'
r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
Keyword.Constant),
],
# 'operators': [
# (r'(-|=|<=|>=|<|>|<>|&|!=|'
# r'\||\*|\+|\^|/|!|~|~=)', Operator)
# ],
}
| bsd-3-clause |
Cubitect/ASMModSuit | ASMVillageMarker.py | 1 | 5318 | import SRenderLib
from asmutils import *
def create_mod(util):
print '\nSearching for mappings for ASMVillageMarker...'
SRenderLib.setup_lib(util)
lines = util.readj('World')
pos = findOps(lines,0,[['.field','protected',';'],['.field','protected','Z'],['.field','protected',';'],['.field','protected',';']])
util.setmap('VillageCollection',betweenr(lines[pos],'L',';'))
util.setmap('World.villageCollectionObj',endw(lines[pos],2))
pos = findOps(lines,pos+1,[['.method','public','()L'+util.getmap('VillageCollection')]])
if pos is not None:
util.setmap('World.getVillageCollection',endw(lines[pos],3))
lines = util.readj('VillageCollection')
pos = findOps(lines,0,[['.method','public','()Ljava/util/List']])
util.setmap('VillageCollection.getVillageList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('Village',betweenr(lines[pos],')L',';'))
lines = util.readj('Village')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('Village.getCenter',endw(lines[pos],3))
util.setmap('BlockPos',betweenr(lines[pos],')L',';'))
pos = findOps(lines,pos+1,[['.method','public','()I']])
util.setmap('Village.getVillageRadius',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public','()Ljava/util/List']])
util.setmap('Village.getVillageDoorInfoList',endw(lines[pos],3))
pos = findOps(lines,pos+1,[['.method','public',')L']])
util.setmap('VillageDoorInfo',betweenr(lines[pos],')L',';'))
lines = util.readj('VillageDoorInfo')
pos = findOps(lines,0,[['.method','public','()L']])
util.setmap('VillageDoorInfo.getDoorBlockPos',endw(lines[pos],3))
lines = util.readj('BlockPos')
pos = findOps(lines,0,[['.super']])
util.setmap('Vec3i',endw(lines[pos],1))
lines = util.readj('Vec3i')
pos = findOps(lines,0, [['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getX',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getY',endw(lines[pos-1],3))
pos = findOps(lines,pos+1,[['.method','public','()I'],['stack 1 locals 1']])
util.setmap('Vec3i.getZ',endw(lines[pos-1],3))
print 'Applying ASMVillageMarker patch...'
util.setmap('ASMVillageMarker','villagemarker/ASMVillageMarker')
lines = util.readt('ASMVillageMarker')
lines = '\1'.join(lines)
lines = lines.replace('net/minecraft/server/integrated/IntegratedServer', util.getmap('IntegratedServer'))
lines = lines.replace('net/minecraft/client/entity/EntityPlayerSP', util.getmap('EntityPlayerSP'))
lines = lines.replace('net/minecraft/client/Minecraft', util.getmap('Minecraft'))
lines = lines.replace('net/minecraft/world/WorldServer', util.getmap('WorldServer'))
lines = lines.replace('net/minecraft/util/math/BlockPos', util.getmap('BlockPos'))
lines = lines.replace('net/minecraft/village/VillageCollection', util.getmap('VillageCollection'))
lines = lines.replace('net/minecraft/village/VillageDoorInfo', util.getmap('VillageDoorInfo'))
lines = lines.replace('net/minecraft/village/Village', util.getmap('Village'))
lines = lines.replace('thePlayer', util.getmap('Minecraft.thePlayer'))
lines = lines.replace('dimension', util.getmap('Entity.dimension'))
lines = lines.replace('isSingleplayer', util.getmap('Minecraft.isSingleplayer'))
lines = lines.replace('worldServerForDimension', util.getmap('MinecraftServer.worldServerForDimension'))
lines = lines.replace('getVillageDoorInfoList', util.getmap('Village.getVillageDoorInfoList'))
lines = lines.replace('getVillageCollection', util.getmap('World.getVillageCollection'))
lines = lines.replace('getVillageRadius', util.getmap('Village.getVillageRadius'))
lines = lines.replace('getVillageList', util.getmap('VillageCollection.getVillageList'))
lines = lines.replace('getDoorBlockPos', util.getmap('VillageDoorInfo.getDoorBlockPos'))
lines = lines.replace('getIntegratedServer', util.getmap('Minecraft.getIntegratedServer'))
lines = lines.replace('getMinecraft', util.getmap('Minecraft.getMinecraft'))
lines = lines.replace('getCenter', util.getmap('Village.getCenter'))
lines = lines.replace('getX', util.getmap('Vec3i.getX'))
lines = lines.replace('getY', util.getmap('Vec3i.getY'))
lines = lines.replace('getZ', util.getmap('Vec3i.getZ'))
lines = lines.split('\1')
util.write2mod('ASMVillageMarker',lines)
print 'Injecting render call...'
lines = util.readj('EntityRenderer')
pos = 0
while True:
pos = findOps(lines,pos+1,[['ldc','culling']])
if pos is None:
break
pos = findOps(lines,pos+1,[['dload'],['dload'],['dload']])
playerX = endw(lines[pos-2],1)
playerY = endw(lines[pos-1],1)
playerZ = endw(lines[pos ],1)
pos = findOps(lines,pos+1,[['ldc','aboveClouds']])
pos = goBackTo(lines,pos,['invokevirtual'])
lines.insert(pos+1,'dload '+playerX+'\n')
lines.insert(pos+2,'dload '+playerY+'\n')
lines.insert(pos+3,'dload '+playerZ+'\n')
lines.insert(pos+4,'invokestatic Method '+util.getmap('ASMVillageMarker')+' render (DDD)V\n')
util.write2mod('EntityRenderer',lines)
| gpl-3.0 |
dyn888/youtube-dl | youtube_dl/extractor/behindkink.py | 145 | 1645 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import url_basename
class BehindKinkIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
_TEST = {
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
'info_dict': {
'id': '37127',
'ext': 'mp4',
'title': 'What are you passionate about – Marley Blaze',
'description': 'md5:aee8e9611b4ff70186f752975d9b94b4',
'upload_date': '20141205',
'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r'<source src="([^"]+)"', webpage, 'video URL')
video_id = url_basename(video_url).split('_')[0]
upload_date = mobj.group('year') + mobj.group('month') + mobj.group('day')
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'upload_date': upload_date,
'age_limit': 18,
}
| unlicense |
shadowmint/nwidget | lib/cocos2d-0.5.5/test/test_menu_items.py | 1 | 2268 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "menu items, ToggleMenuItem, MultipleMenuItem, MenuItem, EntryMenuItem, ImageMenuItem, ColorMenuItem"
from pyglet import image
from pyglet.gl import *
from pyglet import font
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from operator import setslice
def printf(*args):
sys.stdout.write(''.join([str(x) for x in args])+'\n')
class MainMenu(Menu):
def __init__( self ):
super( MainMenu, self ).__init__("Test Menu Items")
# then add the items
item1= ToggleMenuItem('ToggleMenuItem: ', self.on_toggle_callback, True )
resolutions = ['320x200','640x480','800x600', '1024x768', '1200x1024']
item2= MultipleMenuItem('MultipleMenuItem: ',
self.on_multiple_callback,
resolutions)
item3 = MenuItem('MenuItem', self.on_callback )
item4 = EntryMenuItem('EntryMenuItem:', self.on_entry_callback, 'value',
max_length=8)
item5 = ImageMenuItem('imagemenuitem.png', self.on_image_callback)
colors = [(255, 255, 255), (129, 255, 100), (50, 50, 100), (255, 200, 150)]
item6 = ColorMenuItem('ColorMenuItem:', self.on_color_callback, colors)
self.create_menu( [item1,item2,item3,item4,item5,item6] )
def on_quit( self ):
pyglet.app.exit()
def on_multiple_callback(self, idx ):
print 'multiple item callback', idx
def on_toggle_callback(self, b ):
print 'toggle item callback', b
def on_callback(self ):
print 'item callback'
def on_entry_callback (self, value):
print 'entry item callback', value
def on_image_callback (self):
print 'image item callback'
def on_color_callback(self, value):
print 'color item callback:', value
def main():
pyglet.font.add_directory('.')
director.init( resizable=True)
director.run( Scene( MainMenu() ) )
if __name__ == '__main__':
main()
| apache-2.0 |
tangfeng1/flask | tests/test_blueprints.py | 143 | 18147 | # -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
| bsd-3-clause |
iparanza/earthenterprise | earth_enterprise/src/server/wsgi/serve/snippets/util/dbroot_v2_pb2.py | 4 | 184394 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='dbroot_v2.proto',
package='',
serialized_pb='\n\x0f\x64\x62root_v2.proto\";\n\x10StringEntryProto\x12\x11\n\tstring_id\x18\x01 \x02(\x07\x12\x14\n\x0cstring_value\x18\x02 \x02(\t\"8\n\x14StringIdOrValueProto\x12\x11\n\tstring_id\x18\x01 \x01(\x07\x12\r\n\x05value\x18\x02 \x01(\t\"\xc6\x01\n\x10PlanetModelProto\x12\x18\n\x06radius\x18\x01 \x01(\x01:\x08\x36\x33\x37\x38.137\x12\'\n\nflattening\x18\x02 \x01(\x01:\x13\x30.00335281066474748\x12\x16\n\x0e\x65levation_bias\x18\x04 \x01(\x01\x12\'\n\x1fnegative_altitude_exponent_bias\x18\x05 \x01(\x05\x12.\n&compressed_negative_altitude_threshold\x18\x06 \x01(\x01\"|\n\x11ProviderInfoProto\x12\x13\n\x0bprovider_id\x18\x01 \x02(\x05\x12/\n\x10\x63opyright_string\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12!\n\x15vertical_pixel_offset\x18\x03 \x01(\x05:\x02-1\"\xa2\x01\n\nPopUpProto\x12\x1f\n\x10is_balloon_style\x18\x01 \x01(\x08:\x05\x66\x61lse\x12#\n\x04text\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12)\n\x15\x62\x61\x63kground_color_abgr\x18\x03 \x01(\x07:\n4294967295\x12#\n\x0ftext_color_abgr\x18\x04 \x01(\x07:\n4278190080\"\x9e\x04\n\x13StyleAttributeProto\x12\x10\n\x08style_id\x18\x01 \x02(\t\x12\x13\n\x0bprovider_id\x18\x03 \x01(\x05\x12#\n\x0fpoly_color_abgr\x18\x04 \x01(\x07:\n4294967295\x12#\n\x0fline_color_abgr\x18\x05 \x01(\x07:\n4294967295\x12\x15\n\nline_width\x18\x06 \x01(\x02:\x01\x31\x12$\n\x10label_color_abgr\x18\x07 \x01(\x07:\n4294967295\x12\x16\n\x0blabel_scale\x18\x08 \x01(\x02:\x01\x31\x12-\n\x19placemark_icon_color_abgr\x18\t \x01(\x07:\n4294967295\x12\x1f\n\x14placemark_icon_scale\x18\n \x01(\x02:\x01\x31\x12\x32\n\x13placemark_icon_path\x18\x0b \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1b\n\x10placemark_icon_x\x18\x0c \x01(\x05:\x01\x30\x12\x1b\n\x10placemark_icon_y\x18\r \x01(\x05:\x01\x30\x12 \n\x14placemark_icon_width\x18\x0e \x01(\x05:\x02\x33\x32\x12!\n\x15placemark_icon_height\x18\x0f \x01(\x05:\x02\x33\x32\x12\x1b\n\x06pop_up\x18\x10 \x01(\x0b\x32\x0b.PopUpProto\x12!\n\tdraw_flag\x18\x11 \x03(\x0b\x32\x0e.DrawFlagProto\"|\n\rStyleMapProto\x12\x14\n\x0cstyle_map_id\x18\x01 \x02(\x05\x12\x12\n\nchannel_id\x18\x02 \x03(\x05\x12\x1e\n\x16normal_style_attribute\x18\x03 \x01(\x05\x12!\n\x19highlight_style_attribute\x18\x04 \x01(\x05\"4\n\x0eZoomRangeProto\x12\x10\n\x08min_zoom\x18\x01 \x02(\x05\x12\x10\n\x08max_zoom\x18\x02 \x02(\x05\"\xc9\x01\n\rDrawFlagProto\x12\x33\n\x0e\x64raw_flag_type\x18\x01 \x02(\x0e\x32\x1b.DrawFlagProto.DrawFlagType\"\x82\x01\n\x0c\x44rawFlagType\x12\x12\n\x0eTYPE_FILL_ONLY\x10\x01\x12\x15\n\x11TYPE_OUTLINE_ONLY\x10\x02\x12\x19\n\x15TYPE_FILL_AND_OUTLINE\x10\x03\x12\x15\n\x11TYPE_ANTIALIASING\x10\x04\x12\x15\n\x11TYPE_CENTER_LABEL\x10\x05\"\x8c\x01\n\nLayerProto\x12#\n\nzoom_range\x18\x01 \x03(\x0b\x32\x0f.ZoomRangeProto\x12\x1f\n\x13preserve_text_level\x18\x02 \x01(\x05:\x02\x33\x30\x12\x1c\n\x14lod_begin_transition\x18\x04 \x01(\x08\x12\x1a\n\x12lod_end_transition\x18\x05 \x01(\x08\"*\n\x0b\x46olderProto\x12\x1b\n\ris_expandable\x18\x01 \x01(\x08:\x04true\"\x9e\x01\n\x10RequirementProto\x12\x15\n\rrequired_vram\x18\x03 \x01(\t\x12\x1b\n\x13required_client_ver\x18\x04 \x01(\t\x12\x13\n\x0bprobability\x18\x05 \x01(\t\x12\x1b\n\x13required_user_agent\x18\x06 \x01(\t\x12$\n\x1crequired_client_capabilities\x18\x07 \x01(\t\"`\n\x0bLookAtProto\x12\x11\n\tlongitude\x18\x01 \x02(\x02\x12\x10\n\x08latitude\x18\x02 \x02(\x02\x12\r\n\x05range\x18\x03 \x01(\x02\x12\x0c\n\x04tilt\x18\x04 \x01(\x02\x12\x0f\n\x07heading\x18\x05 \x01(\x02\"\x97\x06\n\x12NestedFeatureProto\x12\x35\n\x0c\x66\x65\x61ture_type\x18\x01 \x01(\x0e\x32\x1f.NestedFeatureProto.FeatureType\x12&\n\x07kml_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x14\n\x0c\x64\x61tabase_url\x18\x15 \x01(\t\x12\x1a\n\x05layer\x18\x03 \x01(\x0b\x32\x0b.LayerProto\x12\x1c\n\x06\x66older\x18\x04 \x01(\x0b\x32\x0c.FolderProto\x12&\n\x0brequirement\x18\x05 \x01(\x0b\x32\x11.RequirementProto\x12\x12\n\nchannel_id\x18\x06 \x02(\x05\x12+\n\x0c\x64isplay_name\x18\x07 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x18\n\nis_visible\x18\x08 \x01(\x08:\x04true\x12\x18\n\nis_enabled\x18\t \x01(\x08:\x04true\x12\x19\n\nis_checked\x18\n \x01(\x08:\x05\x66\x61lse\x12-\n\x14layer_menu_icon_path\x18\x0b \x01(\t:\x0ficons/773_l.png\x12*\n\x0b\x64\x65scription\x18\x0c \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x07look_at\x18\r \x01(\x0b\x32\x0c.LookAtProto\x12\x12\n\nasset_uuid\x18\x0f \x01(\t\x12\x1c\n\x0eis_save_locked\x18\x10 \x01(\x08:\x04true\x12%\n\x08\x63hildren\x18\x11 \x03(\x0b\x32\x13.NestedFeatureProto\x12!\n\x19\x63lient_config_script_name\x18\x12 \x01(\t\x12%\n\x19\x64iorama_data_channel_base\x18\x13 \x01(\x05:\x02-1\x12%\n\x19replica_data_channel_base\x18\x14 \x01(\x05:\x02-1\"V\n\x0b\x46\x65\x61tureType\x12\x10\n\x0cTYPE_POINT_Z\x10\x01\x12\x12\n\x0eTYPE_POLYGON_Z\x10\x02\x12\x0f\n\x0bTYPE_LINE_Z\x10\x03\x12\x10\n\x0cTYPE_TERRAIN\x10\x04\"\xd6\x01\n\x16MfeDomainFeaturesProto\x12\x14\n\x0c\x63ountry_code\x18\x01 \x02(\t\x12\x13\n\x0b\x64omain_name\x18\x02 \x02(\t\x12\x44\n\x12supported_features\x18\x03 \x03(\x0e\x32(.MfeDomainFeaturesProto.SupportedFeature\"K\n\x10SupportedFeature\x12\r\n\tGEOCODING\x10\x00\x12\x10\n\x0cLOCAL_SEARCH\x10\x01\x12\x16\n\x12\x44RIVING_DIRECTIONS\x10\x02\"\xb5\x0c\n\x12\x43lientOptionsProto\x12\x1a\n\x12\x64isable_disk_cache\x18\x01 \x01(\x08\x12&\n\x1e\x64isable_embedded_browser_vista\x18\x02 \x01(\x08\x12\x1d\n\x0f\x64raw_atmosphere\x18\x03 \x01(\x08:\x04true\x12\x18\n\ndraw_stars\x18\x04 \x01(\x08:\x04true\x12\x1a\n\x12shader_file_prefix\x18\x05 \x01(\t\x12%\n\x1duse_protobuf_quadtree_packets\x18\x06 \x01(\x08\x12(\n\x1ause_extended_copyright_ids\x18\x07 \x01(\x08:\x04true\x12I\n\x16precipitations_options\x18\x08 \x01(\x0b\x32).ClientOptionsProto.PrecipitationsOptions\x12;\n\x0f\x63\x61pture_options\x18\t \x01(\x0b\x32\".ClientOptionsProto.CaptureOptions\x12\x1f\n\x11show_2d_maps_icon\x18\n \x01(\x08:\x04true\x12 \n\x18\x64isable_internal_browser\x18\x0b \x01(\x08\x12\"\n\x1ainternal_browser_blacklist\x18\x0c \x01(\t\x12,\n!internal_browser_origin_whitelist\x18\r \x01(\t:\x01*\x12 \n\x18polar_tile_merging_level\x18\x0e \x01(\x05\x12:\n\x1bjs_bridge_request_whitelist\x18\x0f \x01(\t:\x15http://*.google.com/*\x12\x35\n\x0cmaps_options\x18\x10 \x01(\x0b\x32\x1f.ClientOptionsProto.MapsOptions\x1a\xd2\x04\n\x15PrecipitationsOptions\x12\x11\n\timage_url\x18\x01 \x01(\t\x12\x1e\n\x11image_expire_time\x18\x02 \x01(\x05:\x03\x39\x30\x30\x12\x1e\n\x12max_color_distance\x18\x03 \x01(\x05:\x02\x32\x30\x12\x16\n\x0bimage_level\x18\x04 \x01(\x05:\x01\x35\x12Q\n\x0fweather_mapping\x18\x05 \x03(\x0b\x32\x38.ClientOptionsProto.PrecipitationsOptions.WeatherMapping\x12\x18\n\x10\x63louds_layer_url\x18\x06 \x01(\t\x12(\n\x1c\x61nimation_deceleration_delay\x18\x07 \x01(\x02:\x02\x32\x30\x1a\xb6\x02\n\x0eWeatherMapping\x12\x12\n\ncolor_abgr\x18\x01 \x02(\r\x12Z\n\x0cweather_type\x18\x02 \x02(\x0e\x32\x44.ClientOptionsProto.PrecipitationsOptions.WeatherMapping.WeatherType\x12\x15\n\nelongation\x18\x03 \x01(\x02:\x01\x31\x12\x0f\n\x07opacity\x18\x04 \x01(\x02\x12\x13\n\x0b\x66og_density\x18\x05 \x01(\x02\x12\x0e\n\x06speed0\x18\x06 \x01(\x02\x12\x0e\n\x06speed1\x18\x07 \x01(\x02\x12\x0e\n\x06speed2\x18\x08 \x01(\x02\x12\x0e\n\x06speed3\x18\t \x01(\x02\"7\n\x0bWeatherType\x12\x14\n\x10NO_PRECIPITATION\x10\x00\x12\x08\n\x04RAIN\x10\x01\x12\x08\n\x04SNOW\x10\x02\x1a~\n\x0e\x43\x61ptureOptions\x12!\n\x13\x61llow_save_as_image\x18\x01 \x01(\x08:\x04true\x12\"\n\x14max_free_capture_res\x18\x02 \x01(\x05:\x04\x32\x34\x30\x30\x12%\n\x17max_premium_capture_res\x18\x03 \x01(\x05:\x04\x34\x38\x30\x30\x1a\xad\x01\n\x0bMapsOptions\x12\x13\n\x0b\x65nable_maps\x18\x01 \x01(\x08\x12\"\n\x1a\x64ocs_auto_download_enabled\x18\x02 \x01(\x08\x12#\n\x1b\x64ocs_auto_download_interval\x18\x03 \x01(\x05\x12 \n\x18\x64ocs_auto_upload_enabled\x18\x04 \x01(\x08\x12\x1e\n\x16\x64ocs_auto_upload_delay\x18\x05 \x01(\x05\"\xdc\x03\n\x14\x46\x65tchingOptionsProto\x12!\n\x16max_requests_per_query\x18\x01 \x01(\x05:\x01\x31\x12$\n\x1c\x66orce_max_requests_per_query\x18\x0c \x01(\x08\x12\x14\n\x0csort_batches\x18\r \x01(\x08\x12\x17\n\x0cmax_drawable\x18\x02 \x01(\x05:\x01\x32\x12\x16\n\x0bmax_imagery\x18\x03 \x01(\x05:\x01\x32\x12\x16\n\x0bmax_terrain\x18\x04 \x01(\x05:\x01\x35\x12\x17\n\x0cmax_quadtree\x18\x05 \x01(\x05:\x01\x35\x12\x1f\n\x14max_diorama_metadata\x18\x06 \x01(\x05:\x01\x31\x12\x1b\n\x10max_diorama_data\x18\x07 \x01(\x05:\x01\x30\x12#\n\x18max_consumer_fetch_ratio\x18\x08 \x01(\x02:\x01\x31\x12!\n\x16max_pro_ec_fetch_ratio\x18\t \x01(\x02:\x01\x30\x12\x18\n\x10safe_overall_qps\x18\n \x01(\x02\x12\x18\n\x10safe_imagery_qps\x18\x0b \x01(\x02\x12\x31\n\x11\x64omains_for_https\x18\x0e \x01(\t:\x16google.com gstatic.com\x12\x16\n\x0ehosts_for_http\x18\x0f \x01(\t\"\x91\x01\n\x17TimeMachineOptionsProto\x12\x12\n\nserver_url\x18\x01 \x01(\t\x12\x16\n\x0eis_timemachine\x18\x02 \x01(\x08\x12\x1a\n\rdwell_time_ms\x18\x03 \x01(\x05:\x03\x35\x30\x30\x12.\n\x1f\x64iscoverability_altitude_meters\x18\x04 \x01(\x05:\x05\x31\x35\x30\x30\x30\"\xe3\x01\n\x13\x41utopiaOptionsProto\x12\x37\n\x13metadata_server_url\x18\x01 \x01(\t:\x1ahttp://cbk0.google.com/cbk\x12\x37\n\x13\x64\x65pthmap_server_url\x18\x02 \x01(\t:\x1ahttp://cbk0.google.com/cbk\x12\x1e\n\x14\x63overage_overlay_url\x18\x03 \x01(\t:\x00\x12\x17\n\x0fmax_imagery_qps\x18\x04 \x01(\x02\x12!\n\x19max_metadata_depthmap_qps\x18\x05 \x01(\x02\"E\n\x0f\x43SIOptionsProto\x12\x1b\n\x13sampling_percentage\x18\x01 \x01(\x05\x12\x15\n\rexperiment_id\x18\x02 \x01(\t\"\xb3\x02\n\x0eSearchTabProto\x12\x12\n\nis_visible\x18\x01 \x02(\x08\x12(\n\ttab_label\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x10\n\x08\x62\x61se_url\x18\x03 \x01(\t\x12\x17\n\x0fviewport_prefix\x18\x04 \x01(\t\x12/\n\tinput_box\x18\x05 \x03(\x0b\x32\x1c.SearchTabProto.InputBoxInfo\x12&\n\x0brequirement\x18\x06 \x01(\x0b\x32\x11.RequirementProto\x1a_\n\x0cInputBoxInfo\x12$\n\x05label\x18\x01 \x02(\x0b\x32\x15.StringIdOrValueProto\x12\x12\n\nquery_verb\x18\x02 \x02(\t\x12\x15\n\rquery_prepend\x18\x03 \x01(\t\"\x90\x03\n\x0c\x43obrandProto\x12\x10\n\x08logo_url\x18\x01 \x02(\t\x12$\n\x07x_coord\x18\x02 \x01(\x0b\x32\x13.CobrandProto.Coord\x12$\n\x07y_coord\x18\x03 \x01(\x0b\x32\x13.CobrandProto.Coord\x12\x36\n\ttie_point\x18\x04 \x01(\x0e\x32\x16.CobrandProto.TiePoint:\x0b\x42OTTOM_LEFT\x12\x16\n\x0bscreen_size\x18\x05 \x01(\x01:\x01\x30\x1a\x35\n\x05\x43oord\x12\x10\n\x05value\x18\x01 \x02(\x01:\x01\x30\x12\x1a\n\x0bis_relative\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x9a\x01\n\x08TiePoint\x12\x0c\n\x08TOP_LEFT\x10\x00\x12\x0e\n\nTOP_CENTER\x10\x01\x12\r\n\tTOP_RIGHT\x10\x02\x12\x0c\n\x08MID_LEFT\x10\x03\x12\x0e\n\nMID_CENTER\x10\x04\x12\r\n\tMID_RIGHT\x10\x05\x12\x0f\n\x0b\x42OTTOM_LEFT\x10\x06\x12\x11\n\rBOTTOM_CENTER\x10\x07\x12\x10\n\x0c\x42OTTOM_RIGHT\x10\x08\"^\n\x18\x44\x61tabaseDescriptionProto\x12,\n\rdatabase_name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x14\n\x0c\x64\x61tabase_url\x18\x02 \x02(\t\"=\n\x11\x43onfigScriptProto\x12\x13\n\x0bscript_name\x18\x01 \x02(\t\x12\x13\n\x0bscript_data\x18\x02 \x02(\t\"0\n\x10SwoopParamsProto\x12\x1c\n\x14start_dist_in_meters\x18\x01 \x01(\x01\"\xc4\x01\n\x12PostingServerProto\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x08\x62\x61se_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10post_wizard_path\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10\x66ile_submit_path\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\"a\n\x16PlanetaryDatabaseProto\x12\"\n\x03url\x18\x01 \x02(\x0b\x32\x15.StringIdOrValueProto\x12#\n\x04name\x18\x02 \x02(\x0b\x32\x15.StringIdOrValueProto\"b\n\x0eLogServerProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x0e\n\x06\x65nable\x18\x02 \x01(\x08\x12\x1c\n\x11throttling_factor\x18\x03 \x01(\x05:\x01\x31\"\xff+\n\x0f\x45ndSnippetProto\x12 \n\x05model\x18\x01 \x01(\x0b\x32\x11.PlanetModelProto\x12.\n\x0f\x61uth_server_url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1e\n\x16\x64isable_authentication\x18\x03 \x01(\x08\x12,\n\x0bmfe_domains\x18\x04 \x03(\x0b\x32\x17.MfeDomainFeaturesProto\x12 \n\x0emfe_lang_param\x18\x05 \x01(\t:\x08hl=$[hl]\x12\x18\n\x10\x61\x64s_url_patterns\x18\x06 \x01(\t\x12\x33\n\x14reverse_geocoder_url\x18\x07 \x01(\x0b\x32\x15.StringIdOrValueProto\x12,\n!reverse_geocoder_protocol_version\x18\x08 \x01(\x05:\x01\x33\x12\'\n\x19sky_database_is_available\x18\t \x01(\x08:\x04true\x12/\n\x10sky_database_url\x18\n \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x38\n\x19\x64\x65\x66\x61ult_web_page_intl_url\x18\x0b \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x11num_start_up_tips\x18\x0c \x01(\x05:\x02\x31\x37\x12\x30\n\x11start_up_tips_url\x18\r \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x15num_pro_start_up_tips\x18\x33 \x01(\x05\x12\x34\n\x15pro_start_up_tips_url\x18\x34 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x34\n\x15startup_tips_intl_url\x18@ \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x32\n\x13user_guide_intl_url\x18\x0e \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x36\n\x17support_center_intl_url\x18\x0f \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x38\n\x19\x62usiness_listing_intl_url\x18\x10 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x36\n\x17support_answer_intl_url\x18\x11 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16support_topic_intl_url\x18\x12 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x37\n\x18support_request_intl_url\x18\x13 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0e\x65\x61rth_intl_url\x18\x14 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0f\x61\x64\x64_content_url\x18\x15 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x39\n\x1asketchup_not_installed_url\x18\x16 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12sketchup_error_url\x18\x17 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10\x66ree_license_url\x18\x18 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0fpro_license_url\x18\x19 \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x0ctutorial_url\x18\x30 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16keyboard_shortcuts_url\x18\x31 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x30\n\x11release_notes_url\x18\x32 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x0ehide_user_data\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0buse_ge_logo\x18\x1b \x01(\x08:\x04true\x12;\n\x1c\x64iorama_description_url_base\x18\x1c \x01(\x0b\x32\x15.StringIdOrValueProto\x12)\n\x15\x64iorama_default_color\x18\x1d \x01(\r:\n4291281607\x12\x34\n\x15\x64iorama_blacklist_url\x18\x35 \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x0e\x63lient_options\x18\x1e \x01(\x0b\x32\x13.ClientOptionsProto\x12/\n\x10\x66\x65tching_options\x18\x1f \x01(\x0b\x32\x15.FetchingOptionsProto\x12\x36\n\x14time_machine_options\x18 \x01(\x0b\x32\x18.TimeMachineOptionsProto\x12%\n\x0b\x63si_options\x18! \x01(\x0b\x32\x10.CSIOptionsProto\x12#\n\nsearch_tab\x18\" \x03(\x0b\x32\x0f.SearchTabProto\x12#\n\x0c\x63obrand_info\x18# \x03(\x0b\x32\r.CobrandProto\x12\x31\n\x0evalid_database\x18$ \x03(\x0b\x32\x19.DatabaseDescriptionProto\x12)\n\rconfig_script\x18% \x03(\x0b\x32\x12.ConfigScriptProto\x12\x30\n\x11\x64\x65\x61uth_server_url\x18& \x01(\x0b\x32\x15.StringIdOrValueProto\x12+\n\x10swoop_parameters\x18\' \x01(\x0b\x32\x11.SwoopParamsProto\x12,\n\x0f\x62\x62s_server_info\x18( \x01(\x0b\x32\x13.PostingServerProto\x12\x33\n\x16\x64\x61ta_error_server_info\x18) \x01(\x0b\x32\x13.PostingServerProto\x12\x33\n\x12planetary_database\x18* \x03(\x0b\x32\x17.PlanetaryDatabaseProto\x12#\n\nlog_server\x18+ \x01(\x0b\x32\x0f.LogServerProto\x12-\n\x0f\x61utopia_options\x18, \x01(\x0b\x32\x14.AutopiaOptionsProto\x12\x39\n\rsearch_config\x18\x36 \x01(\x0b\x32\".EndSnippetProto.SearchConfigProto\x12\x35\n\x0bsearch_info\x18- \x01(\x0b\x32 .EndSnippetProto.SearchInfoProto\x12N\n\x1a\x65levation_service_base_url\x18. \x01(\t:*http://maps.google.com/maps/api/elevation/\x12*\n\x1d\x65levation_profile_query_delay\x18/ \x01(\x05:\x03\x35\x30\x30\x12.\n\x0fpro_upgrade_url\x18\x37 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x32\n\x13\x65\x61rth_community_url\x18\x38 \x01(\x0b\x32\x15.StringIdOrValueProto\x12.\n\x0fgoogle_maps_url\x18\x39 \x01(\x0b\x32\x15.StringIdOrValueProto\x12*\n\x0bsharing_url\x18: \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12privacy_policy_url\x18; \x01(\x0b\x32\x15.StringIdOrValueProto\x12\"\n\x13\x64o_gplus_user_check\x18< \x01(\x08:\x05\x66\x61lse\x12?\n\x13rocktree_data_proto\x18= \x01(\x0b\x32\".EndSnippetProto.RockTreeDataProto\x12?\n\x10\x66ilmstrip_config\x18> \x03(\x0b\x32%.EndSnippetProto.FilmstripConfigProto\x12\x1a\n\x12show_signin_button\x18? \x01(\x08\x12\x35\n\x16pro_measure_upsell_url\x18\x41 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x33\n\x14pro_print_upsell_url\x18\x42 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x37\n\x0fstar_data_proto\x18\x43 \x01(\x0b\x32\x1e.EndSnippetProto.StarDataProto\x12+\n\x0c\x66\x65\x65\x64\x62\x61\x63k_url\x18\x44 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\xcc\n\n\x11SearchConfigProto\x12\x46\n\rsearch_server\x18\x01 \x03(\x0b\x32/.EndSnippetProto.SearchConfigProto.SearchServer\x12M\n\x0eonebox_service\x18\x02 \x03(\x0b\x32\x35.EndSnippetProto.SearchConfigProto.OneboxServiceProto\x12-\n\x0ekml_search_url\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0ekml_render_url\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12search_history_url\x18\x06 \x01(\x0b\x32\x15.StringIdOrValueProto\x12-\n\x0e\x65rror_page_url\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\xf4\x06\n\x0cSearchServer\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\"\n\x03url\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12Y\n\x04type\x18\x03 \x01(\x0e\x32:.EndSnippetProto.SearchConfigProto.SearchServer.ResultType:\x0fRESULT_TYPE_KML\x12\x31\n\x12html_transform_url\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x30\n\x11kml_transform_url\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x12W\n\x0fsupplemental_ui\x18\x06 \x01(\x0b\x32>.EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi\x12)\n\nsuggestion\x18\t \x03(\x0b\x32\x15.StringIdOrValueProto\x12Q\n\tsearchlet\x18\x07 \x03(\x0b\x32>.EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto\x12\'\n\x0crequirements\x18\x08 \x01(\x0b\x32\x11.RequirementProto\x12-\n\x0esuggest_server\x18\n \x01(\x0b\x32\x15.StringIdOrValueProto\x1ao\n\x0eSupplementalUi\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12$\n\x05label\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x13\n\x06height\x18\x03 \x01(\x05:\x03\x31\x36\x30\x1a\x82\x01\n\x0eSearchletProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12#\n\x04name\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x0crequirements\x18\x03 \x01(\x0b\x32\x11.RequirementProto\"6\n\nResultType\x12\x13\n\x0fRESULT_TYPE_KML\x10\x00\x12\x13\n\x0fRESULT_TYPE_XML\x10\x01\x1ai\n\x12OneboxServiceProto\x12*\n\x0bservice_url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\'\n\x0crequirements\x18\x02 \x01(\x0b\x32\x11.RequirementProto\x1a]\n\x0fSearchInfoProto\x12\x30\n\x0b\x64\x65\x66\x61ult_url\x18\x01 \x01(\t:\x1bhttp://maps.google.com/maps\x12\x18\n\rgeocode_param\x18\x02 \x01(\t:\x01q\x1a\x37\n\x11RockTreeDataProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\x91\x06\n\x14\x46ilmstripConfigProto\x12\'\n\x0crequirements\x18\x01 \x01(\x0b\x32\x11.RequirementProto\x12\x34\n\x15\x61lleycat_url_template\x18\x02 \x01(\x0b\x32\x15.StringIdOrValueProto\x12=\n\x1e\x66\x61llback_alleycat_url_template\x18\t \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x34\n\x15metadata_url_template\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16thumbnail_url_template\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10kml_url_template\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x31\n\x12\x66\x65\x61tured_tours_url\x18\x06 \x01(\x0b\x32\x15.StringIdOrValueProto\x12 \n\x18\x65nable_viewport_fallback\x18\x07 \x01(\x08\x12\"\n\x1aviewport_fallback_distance\x18\x08 \x01(\r\x12T\n\x0cimagery_type\x18\n \x03(\x0b\x32>.EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto\x1a\xed\x01\n\x18\x41lleycatImageryTypeProto\x12\x17\n\x0fimagery_type_id\x18\x01 \x01(\x05\x12\x1a\n\x12imagery_type_label\x18\x02 \x01(\t\x12\x34\n\x15metadata_url_template\x18\x03 \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x35\n\x16thumbnail_url_template\x18\x04 \x01(\x0b\x32\x15.StringIdOrValueProto\x12/\n\x10kml_url_template\x18\x05 \x01(\x0b\x32\x15.StringIdOrValueProto\x1a\x33\n\rStarDataProto\x12\"\n\x03url\x18\x01 \x01(\x0b\x32\x15.StringIdOrValueProto\"b\n\x0e\x44\x62RootRefProto\x12\x0b\n\x03url\x18\x02 \x02(\t\x12\x1a\n\x0bis_critical\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\'\n\x0crequirements\x18\x03 \x01(\x0b\x32\x11.RequirementProto\"0\n\x14\x44\x61tabaseVersionProto\x12\x18\n\x10quadtree_version\x18\x01 \x02(\r\"\xb6\x04\n\x0b\x44\x62RootProto\x12,\n\rdatabase_name\x18\x0f \x01(\x0b\x32\x15.StringIdOrValueProto\x12\x1d\n\x0fimagery_present\x18\x01 \x01(\x08:\x04true\x12\x1c\n\rproto_imagery\x18\x0e \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0fterrain_present\x18\x02 \x01(\x08:\x05\x66\x61lse\x12)\n\rprovider_info\x18\x03 \x03(\x0b\x32\x12.ProviderInfoProto\x12+\n\x0enested_feature\x18\x04 \x03(\x0b\x32\x13.NestedFeatureProto\x12-\n\x0fstyle_attribute\x18\x05 \x03(\x0b\x32\x14.StyleAttributeProto\x12!\n\tstyle_map\x18\x06 \x03(\x0b\x32\x0e.StyleMapProto\x12%\n\x0b\x65nd_snippet\x18\x07 \x01(\x0b\x32\x10.EndSnippetProto\x12,\n\x11translation_entry\x18\x08 \x03(\x0b\x32\x11.StringEntryProto\x12\x14\n\x08language\x18\t \x01(\t:\x02\x65n\x12\x12\n\x07version\x18\n \x01(\x05:\x01\x35\x12)\n\x10\x64\x62root_reference\x18\x0b \x03(\x0b\x32\x0f.DbRootRefProto\x12/\n\x10\x64\x61tabase_version\x18\r \x01(\x0b\x32\x15.DatabaseVersionProto\x12\x17\n\x0frefresh_timeout\x18\x10 \x01(\x05\"\xa9\x01\n\x14\x45ncryptedDbRootProto\x12=\n\x0f\x65ncryption_type\x18\x01 \x01(\x0e\x32$.EncryptedDbRootProto.EncryptionType\x12\x17\n\x0f\x65ncryption_data\x18\x02 \x01(\x0c\x12\x13\n\x0b\x64\x62root_data\x18\x03 \x01(\x0c\"$\n\x0e\x45ncryptionType\x12\x12\n\x0e\x45NCRYPTION_XOR\x10\x00')
_DRAWFLAGPROTO_DRAWFLAGTYPE = descriptor.EnumDescriptor(
name='DrawFlagType',
full_name='DrawFlagProto.DrawFlagType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_FILL_ONLY', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_OUTLINE_ONLY', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FILL_AND_OUTLINE', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_ANTIALIASING', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_CENTER_LABEL', index=4, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1427,
serialized_end=1557,
)
_NESTEDFEATUREPROTO_FEATURETYPE = descriptor.EnumDescriptor(
name='FeatureType',
full_name='NestedFeatureProto.FeatureType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_POINT_Z', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_POLYGON_Z', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_LINE_Z', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_TERRAIN', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2711,
serialized_end=2797,
)
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE = descriptor.EnumDescriptor(
name='SupportedFeature',
full_name='MfeDomainFeaturesProto.SupportedFeature',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='GEOCODING', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LOCAL_SEARCH', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DRIVING_DIRECTIONS', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2939,
serialized_end=3014,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE = descriptor.EnumDescriptor(
name='WeatherType',
full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.WeatherType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='NO_PRECIPITATION', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RAIN', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='SNOW', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4247,
serialized_end=4302,
)
_COBRANDPROTO_TIEPOINT = descriptor.EnumDescriptor(
name='TiePoint',
full_name='CobrandProto.TiePoint',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TOP_LEFT', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TOP_CENTER', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TOP_RIGHT', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_LEFT', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_CENTER', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='MID_RIGHT', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_LEFT', index=6, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_CENTER', index=7, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BOTTOM_RIGHT', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6093,
serialized_end=6247,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE = descriptor.EnumDescriptor(
name='ResultType',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.ResultType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='RESULT_TYPE_KML', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='RESULT_TYPE_XML', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=11334,
serialized_end=11388,
)
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE = descriptor.EnumDescriptor(
name='EncryptionType',
full_name='EncryptedDbRootProto.EncryptionType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='ENCRYPTION_XOR', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13343,
serialized_end=13379,
)
_STRINGENTRYPROTO = descriptor.Descriptor(
name='StringEntryProto',
full_name='StringEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='string_id', full_name='StringEntryProto.string_id', index=0,
number=1, type=7, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_value', full_name='StringEntryProto.string_value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=19,
serialized_end=78,
)
_STRINGIDORVALUEPROTO = descriptor.Descriptor(
name='StringIdOrValueProto',
full_name='StringIdOrValueProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='string_id', full_name='StringIdOrValueProto.string_id', index=0,
number=1, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='StringIdOrValueProto.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=80,
serialized_end=136,
)
_PLANETMODELPROTO = descriptor.Descriptor(
name='PlanetModelProto',
full_name='PlanetModelProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='radius', full_name='PlanetModelProto.radius', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=6378.137,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='flattening', full_name='PlanetModelProto.flattening', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.00335281066474748,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_bias', full_name='PlanetModelProto.elevation_bias', index=2,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='negative_altitude_exponent_bias', full_name='PlanetModelProto.negative_altitude_exponent_bias', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='compressed_negative_altitude_threshold', full_name='PlanetModelProto.compressed_negative_altitude_threshold', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=139,
serialized_end=337,
)
_PROVIDERINFOPROTO = descriptor.Descriptor(
name='ProviderInfoProto',
full_name='ProviderInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='provider_id', full_name='ProviderInfoProto.provider_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='copyright_string', full_name='ProviderInfoProto.copyright_string', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='vertical_pixel_offset', full_name='ProviderInfoProto.vertical_pixel_offset', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=339,
serialized_end=463,
)
_POPUPPROTO = descriptor.Descriptor(
name='PopUpProto',
full_name='PopUpProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_balloon_style', full_name='PopUpProto.is_balloon_style', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text', full_name='PopUpProto.text', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='background_color_abgr', full_name='PopUpProto.background_color_abgr', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='text_color_abgr', full_name='PopUpProto.text_color_abgr', index=3,
number=4, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4278190080,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=466,
serialized_end=628,
)
_STYLEATTRIBUTEPROTO = descriptor.Descriptor(
name='StyleAttributeProto',
full_name='StyleAttributeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='style_id', full_name='StyleAttributeProto.style_id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='provider_id', full_name='StyleAttributeProto.provider_id', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='poly_color_abgr', full_name='StyleAttributeProto.poly_color_abgr', index=2,
number=4, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='line_color_abgr', full_name='StyleAttributeProto.line_color_abgr', index=3,
number=5, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='line_width', full_name='StyleAttributeProto.line_width', index=4,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_color_abgr', full_name='StyleAttributeProto.label_color_abgr', index=5,
number=7, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label_scale', full_name='StyleAttributeProto.label_scale', index=6,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_color_abgr', full_name='StyleAttributeProto.placemark_icon_color_abgr', index=7,
number=9, type=7, cpp_type=3, label=1,
has_default_value=True, default_value=4294967295,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_scale', full_name='StyleAttributeProto.placemark_icon_scale', index=8,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_path', full_name='StyleAttributeProto.placemark_icon_path', index=9,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_x', full_name='StyleAttributeProto.placemark_icon_x', index=10,
number=12, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_y', full_name='StyleAttributeProto.placemark_icon_y', index=11,
number=13, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_width', full_name='StyleAttributeProto.placemark_icon_width', index=12,
number=14, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=32,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='placemark_icon_height', full_name='StyleAttributeProto.placemark_icon_height', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=32,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pop_up', full_name='StyleAttributeProto.pop_up', index=14,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_flag', full_name='StyleAttributeProto.draw_flag', index=15,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=631,
serialized_end=1173,
)
_STYLEMAPPROTO = descriptor.Descriptor(
name='StyleMapProto',
full_name='StyleMapProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='style_map_id', full_name='StyleMapProto.style_map_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channel_id', full_name='StyleMapProto.channel_id', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='normal_style_attribute', full_name='StyleMapProto.normal_style_attribute', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='highlight_style_attribute', full_name='StyleMapProto.highlight_style_attribute', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1175,
serialized_end=1299,
)
_ZOOMRANGEPROTO = descriptor.Descriptor(
name='ZoomRangeProto',
full_name='ZoomRangeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='min_zoom', full_name='ZoomRangeProto.min_zoom', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_zoom', full_name='ZoomRangeProto.max_zoom', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1301,
serialized_end=1353,
)
_DRAWFLAGPROTO = descriptor.Descriptor(
name='DrawFlagProto',
full_name='DrawFlagProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='draw_flag_type', full_name='DrawFlagProto.draw_flag_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_DRAWFLAGPROTO_DRAWFLAGTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1356,
serialized_end=1557,
)
_LAYERPROTO = descriptor.Descriptor(
name='LayerProto',
full_name='LayerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='zoom_range', full_name='LayerProto.zoom_range', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='preserve_text_level', full_name='LayerProto.preserve_text_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lod_begin_transition', full_name='LayerProto.lod_begin_transition', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='lod_end_transition', full_name='LayerProto.lod_end_transition', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1560,
serialized_end=1700,
)
_FOLDERPROTO = descriptor.Descriptor(
name='FolderProto',
full_name='FolderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_expandable', full_name='FolderProto.is_expandable', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1702,
serialized_end=1744,
)
_REQUIREMENTPROTO = descriptor.Descriptor(
name='RequirementProto',
full_name='RequirementProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='required_vram', full_name='RequirementProto.required_vram', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_client_ver', full_name='RequirementProto.required_client_ver', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='probability', full_name='RequirementProto.probability', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_user_agent', full_name='RequirementProto.required_user_agent', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='required_client_capabilities', full_name='RequirementProto.required_client_capabilities', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1747,
serialized_end=1905,
)
_LOOKATPROTO = descriptor.Descriptor(
name='LookAtProto',
full_name='LookAtProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='longitude', full_name='LookAtProto.longitude', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='latitude', full_name='LookAtProto.latitude', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='range', full_name='LookAtProto.range', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tilt', full_name='LookAtProto.tilt', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='heading', full_name='LookAtProto.heading', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1907,
serialized_end=2003,
)
_NESTEDFEATUREPROTO = descriptor.Descriptor(
name='NestedFeatureProto',
full_name='NestedFeatureProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='feature_type', full_name='NestedFeatureProto.feature_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url', full_name='NestedFeatureProto.kml_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_url', full_name='NestedFeatureProto.database_url', index=2,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='layer', full_name='NestedFeatureProto.layer', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='folder', full_name='NestedFeatureProto.folder', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirement', full_name='NestedFeatureProto.requirement', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='channel_id', full_name='NestedFeatureProto.channel_id', index=6,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='display_name', full_name='NestedFeatureProto.display_name', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_visible', full_name='NestedFeatureProto.is_visible', index=8,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_enabled', full_name='NestedFeatureProto.is_enabled', index=9,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_checked', full_name='NestedFeatureProto.is_checked', index=10,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='layer_menu_icon_path', full_name='NestedFeatureProto.layer_menu_icon_path', index=11,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("icons/773_l.png", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='description', full_name='NestedFeatureProto.description', index=12,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='look_at', full_name='NestedFeatureProto.look_at', index=13,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='asset_uuid', full_name='NestedFeatureProto.asset_uuid', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_save_locked', full_name='NestedFeatureProto.is_save_locked', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='children', full_name='NestedFeatureProto.children', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_config_script_name', full_name='NestedFeatureProto.client_config_script_name', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_data_channel_base', full_name='NestedFeatureProto.diorama_data_channel_base', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='replica_data_channel_base', full_name='NestedFeatureProto.replica_data_channel_base', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_NESTEDFEATUREPROTO_FEATURETYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2006,
serialized_end=2797,
)
_MFEDOMAINFEATURESPROTO = descriptor.Descriptor(
name='MfeDomainFeaturesProto',
full_name='MfeDomainFeaturesProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='country_code', full_name='MfeDomainFeaturesProto.country_code', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='domain_name', full_name='MfeDomainFeaturesProto.domain_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='supported_features', full_name='MfeDomainFeaturesProto.supported_features', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2800,
serialized_end=3014,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING = descriptor.Descriptor(
name='WeatherMapping',
full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='color_abgr', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.color_abgr', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weather_type', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.weather_type', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elongation', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.elongation', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='opacity', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.opacity', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fog_density', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.fog_density', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed0', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed0', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed1', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed1', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed2', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed2', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='speed3', full_name='ClientOptionsProto.PrecipitationsOptions.WeatherMapping.speed3', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3992,
serialized_end=4302,
)
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS = descriptor.Descriptor(
name='PrecipitationsOptions',
full_name='ClientOptionsProto.PrecipitationsOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='image_url', full_name='ClientOptionsProto.PrecipitationsOptions.image_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='image_expire_time', full_name='ClientOptionsProto.PrecipitationsOptions.image_expire_time', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=900,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_color_distance', full_name='ClientOptionsProto.PrecipitationsOptions.max_color_distance', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=20,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='image_level', full_name='ClientOptionsProto.PrecipitationsOptions.image_level', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='weather_mapping', full_name='ClientOptionsProto.PrecipitationsOptions.weather_mapping', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='clouds_layer_url', full_name='ClientOptionsProto.PrecipitationsOptions.clouds_layer_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='animation_deceleration_delay', full_name='ClientOptionsProto.PrecipitationsOptions.animation_deceleration_delay', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=20,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3708,
serialized_end=4302,
)
_CLIENTOPTIONSPROTO_CAPTUREOPTIONS = descriptor.Descriptor(
name='CaptureOptions',
full_name='ClientOptionsProto.CaptureOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='allow_save_as_image', full_name='ClientOptionsProto.CaptureOptions.allow_save_as_image', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_free_capture_res', full_name='ClientOptionsProto.CaptureOptions.max_free_capture_res', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_premium_capture_res', full_name='ClientOptionsProto.CaptureOptions.max_premium_capture_res', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4800,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4304,
serialized_end=4430,
)
_CLIENTOPTIONSPROTO_MAPSOPTIONS = descriptor.Descriptor(
name='MapsOptions',
full_name='ClientOptionsProto.MapsOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='enable_maps', full_name='ClientOptionsProto.MapsOptions.enable_maps', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_download_enabled', full_name='ClientOptionsProto.MapsOptions.docs_auto_download_enabled', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_download_interval', full_name='ClientOptionsProto.MapsOptions.docs_auto_download_interval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_upload_enabled', full_name='ClientOptionsProto.MapsOptions.docs_auto_upload_enabled', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='docs_auto_upload_delay', full_name='ClientOptionsProto.MapsOptions.docs_auto_upload_delay', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4433,
serialized_end=4606,
)
_CLIENTOPTIONSPROTO = descriptor.Descriptor(
name='ClientOptionsProto',
full_name='ClientOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='disable_disk_cache', full_name='ClientOptionsProto.disable_disk_cache', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_embedded_browser_vista', full_name='ClientOptionsProto.disable_embedded_browser_vista', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_atmosphere', full_name='ClientOptionsProto.draw_atmosphere', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='draw_stars', full_name='ClientOptionsProto.draw_stars', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='shader_file_prefix', full_name='ClientOptionsProto.shader_file_prefix', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_protobuf_quadtree_packets', full_name='ClientOptionsProto.use_protobuf_quadtree_packets', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_extended_copyright_ids', full_name='ClientOptionsProto.use_extended_copyright_ids', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='precipitations_options', full_name='ClientOptionsProto.precipitations_options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='capture_options', full_name='ClientOptionsProto.capture_options', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='show_2d_maps_icon', full_name='ClientOptionsProto.show_2d_maps_icon', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_internal_browser', full_name='ClientOptionsProto.disable_internal_browser', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='internal_browser_blacklist', full_name='ClientOptionsProto.internal_browser_blacklist', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='internal_browser_origin_whitelist', full_name='ClientOptionsProto.internal_browser_origin_whitelist', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("*", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='polar_tile_merging_level', full_name='ClientOptionsProto.polar_tile_merging_level', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='js_bridge_request_whitelist', full_name='ClientOptionsProto.js_bridge_request_whitelist', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://*.google.com/*", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='maps_options', full_name='ClientOptionsProto.maps_options', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS, _CLIENTOPTIONSPROTO_CAPTUREOPTIONS, _CLIENTOPTIONSPROTO_MAPSOPTIONS, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3017,
serialized_end=4606,
)
_FETCHINGOPTIONSPROTO = descriptor.Descriptor(
name='FetchingOptionsProto',
full_name='FetchingOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='max_requests_per_query', full_name='FetchingOptionsProto.max_requests_per_query', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='force_max_requests_per_query', full_name='FetchingOptionsProto.force_max_requests_per_query', index=1,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sort_batches', full_name='FetchingOptionsProto.sort_batches', index=2,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_drawable', full_name='FetchingOptionsProto.max_drawable', index=3,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_imagery', full_name='FetchingOptionsProto.max_imagery', index=4,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_terrain', full_name='FetchingOptionsProto.max_terrain', index=5,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_quadtree', full_name='FetchingOptionsProto.max_quadtree', index=6,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_diorama_metadata', full_name='FetchingOptionsProto.max_diorama_metadata', index=7,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_diorama_data', full_name='FetchingOptionsProto.max_diorama_data', index=8,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_consumer_fetch_ratio', full_name='FetchingOptionsProto.max_consumer_fetch_ratio', index=9,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_pro_ec_fetch_ratio', full_name='FetchingOptionsProto.max_pro_ec_fetch_ratio', index=10,
number=9, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='safe_overall_qps', full_name='FetchingOptionsProto.safe_overall_qps', index=11,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='safe_imagery_qps', full_name='FetchingOptionsProto.safe_imagery_qps', index=12,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='domains_for_https', full_name='FetchingOptionsProto.domains_for_https', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("google.com gstatic.com", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hosts_for_http', full_name='FetchingOptionsProto.hosts_for_http', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=4609,
serialized_end=5085,
)
_TIMEMACHINEOPTIONSPROTO = descriptor.Descriptor(
name='TimeMachineOptionsProto',
full_name='TimeMachineOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='server_url', full_name='TimeMachineOptionsProto.server_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_timemachine', full_name='TimeMachineOptionsProto.is_timemachine', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dwell_time_ms', full_name='TimeMachineOptionsProto.dwell_time_ms', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=500,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='discoverability_altitude_meters', full_name='TimeMachineOptionsProto.discoverability_altitude_meters', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5088,
serialized_end=5233,
)
_AUTOPIAOPTIONSPROTO = descriptor.Descriptor(
name='AutopiaOptionsProto',
full_name='AutopiaOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='metadata_server_url', full_name='AutopiaOptionsProto.metadata_server_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://cbk0.google.com/cbk", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='depthmap_server_url', full_name='AutopiaOptionsProto.depthmap_server_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://cbk0.google.com/cbk", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='coverage_overlay_url', full_name='AutopiaOptionsProto.coverage_overlay_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_imagery_qps', full_name='AutopiaOptionsProto.max_imagery_qps', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='max_metadata_depthmap_qps', full_name='AutopiaOptionsProto.max_metadata_depthmap_qps', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5236,
serialized_end=5463,
)
_CSIOPTIONSPROTO = descriptor.Descriptor(
name='CSIOptionsProto',
full_name='CSIOptionsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='sampling_percentage', full_name='CSIOptionsProto.sampling_percentage', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='experiment_id', full_name='CSIOptionsProto.experiment_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5465,
serialized_end=5534,
)
_SEARCHTABPROTO_INPUTBOXINFO = descriptor.Descriptor(
name='InputBoxInfo',
full_name='SearchTabProto.InputBoxInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='label', full_name='SearchTabProto.InputBoxInfo.label', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='query_verb', full_name='SearchTabProto.InputBoxInfo.query_verb', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='query_prepend', full_name='SearchTabProto.InputBoxInfo.query_prepend', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5749,
serialized_end=5844,
)
_SEARCHTABPROTO = descriptor.Descriptor(
name='SearchTabProto',
full_name='SearchTabProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='is_visible', full_name='SearchTabProto.is_visible', index=0,
number=1, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tab_label', full_name='SearchTabProto.tab_label', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='base_url', full_name='SearchTabProto.base_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='viewport_prefix', full_name='SearchTabProto.viewport_prefix', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='input_box', full_name='SearchTabProto.input_box', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirement', full_name='SearchTabProto.requirement', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SEARCHTABPROTO_INPUTBOXINFO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5537,
serialized_end=5844,
)
_COBRANDPROTO_COORD = descriptor.Descriptor(
name='Coord',
full_name='CobrandProto.Coord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='value', full_name='CobrandProto.Coord.value', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_relative', full_name='CobrandProto.Coord.is_relative', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6037,
serialized_end=6090,
)
_COBRANDPROTO = descriptor.Descriptor(
name='CobrandProto',
full_name='CobrandProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='logo_url', full_name='CobrandProto.logo_url', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='x_coord', full_name='CobrandProto.x_coord', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='y_coord', full_name='CobrandProto.y_coord', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tie_point', full_name='CobrandProto.tie_point', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=6,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='screen_size', full_name='CobrandProto.screen_size', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COBRANDPROTO_COORD, ],
enum_types=[
_COBRANDPROTO_TIEPOINT,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5847,
serialized_end=6247,
)
_DATABASEDESCRIPTIONPROTO = descriptor.Descriptor(
name='DatabaseDescriptionProto',
full_name='DatabaseDescriptionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='database_name', full_name='DatabaseDescriptionProto.database_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_url', full_name='DatabaseDescriptionProto.database_url', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6249,
serialized_end=6343,
)
_CONFIGSCRIPTPROTO = descriptor.Descriptor(
name='ConfigScriptProto',
full_name='ConfigScriptProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='script_name', full_name='ConfigScriptProto.script_name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='script_data', full_name='ConfigScriptProto.script_data', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6345,
serialized_end=6406,
)
_SWOOPPARAMSPROTO = descriptor.Descriptor(
name='SwoopParamsProto',
full_name='SwoopParamsProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='start_dist_in_meters', full_name='SwoopParamsProto.start_dist_in_meters', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6408,
serialized_end=6456,
)
_POSTINGSERVERPROTO = descriptor.Descriptor(
name='PostingServerProto',
full_name='PostingServerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='PostingServerProto.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='base_url', full_name='PostingServerProto.base_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='post_wizard_path', full_name='PostingServerProto.post_wizard_path', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='file_submit_path', full_name='PostingServerProto.file_submit_path', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6459,
serialized_end=6655,
)
_PLANETARYDATABASEPROTO = descriptor.Descriptor(
name='PlanetaryDatabaseProto',
full_name='PlanetaryDatabaseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='PlanetaryDatabaseProto.url', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='PlanetaryDatabaseProto.name', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6657,
serialized_end=6754,
)
_LOGSERVERPROTO = descriptor.Descriptor(
name='LogServerProto',
full_name='LogServerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='LogServerProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enable', full_name='LogServerProto.enable', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='throttling_factor', full_name='LogServerProto.throttling_factor', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6756,
serialized_end=6854,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI = descriptor.Descriptor(
name='SupplementalUi',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.label', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='height', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi.height', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=160,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11088,
serialized_end=11199,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO = descriptor.Descriptor(
name='SearchletProto',
full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='name', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.name', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto.requirements', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11202,
serialized_end=11332,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER = descriptor.Descriptor(
name='SearchServer',
full_name='EndSnippetProto.SearchConfigProto.SearchServer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='EndSnippetProto.SearchConfigProto.SearchServer.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='EndSnippetProto.SearchConfigProto.SearchServer.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='html_transform_url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.html_transform_url', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_transform_url', full_name='EndSnippetProto.SearchConfigProto.SearchServer.kml_transform_url', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='supplemental_ui', full_name='EndSnippetProto.SearchConfigProto.SearchServer.supplemental_ui', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='suggestion', full_name='EndSnippetProto.SearchConfigProto.SearchServer.suggestion', index=6,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='searchlet', full_name='EndSnippetProto.SearchConfigProto.SearchServer.searchlet', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.SearchServer.requirements', index=8,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='suggest_server', full_name='EndSnippetProto.SearchConfigProto.SearchServer.suggest_server', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI, _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO, ],
enum_types=[
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10504,
serialized_end=11388,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO = descriptor.Descriptor(
name='OneboxServiceProto',
full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='service_url', full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto.service_url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.SearchConfigProto.OneboxServiceProto.requirements', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11390,
serialized_end=11495,
)
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO = descriptor.Descriptor(
name='SearchConfigProto',
full_name='EndSnippetProto.SearchConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='search_server', full_name='EndSnippetProto.SearchConfigProto.search_server', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='onebox_service', full_name='EndSnippetProto.SearchConfigProto.onebox_service', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_search_url', full_name='EndSnippetProto.SearchConfigProto.kml_search_url', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_render_url', full_name='EndSnippetProto.SearchConfigProto.kml_render_url', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_history_url', full_name='EndSnippetProto.SearchConfigProto.search_history_url', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='error_page_url', full_name='EndSnippetProto.SearchConfigProto.error_page_url', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER, _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=10139,
serialized_end=11495,
)
_ENDSNIPPETPROTO_SEARCHINFOPROTO = descriptor.Descriptor(
name='SearchInfoProto',
full_name='EndSnippetProto.SearchInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='default_url', full_name='EndSnippetProto.SearchInfoProto.default_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://maps.google.com/maps", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='geocode_param', full_name='EndSnippetProto.SearchInfoProto.geocode_param', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("q", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11497,
serialized_end=11590,
)
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO = descriptor.Descriptor(
name='RockTreeDataProto',
full_name='EndSnippetProto.RockTreeDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.RockTreeDataProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11592,
serialized_end=11647,
)
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO = descriptor.Descriptor(
name='AlleycatImageryTypeProto',
full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='imagery_type_id', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.imagery_type_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_type_label', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.imagery_type_label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.metadata_url_template', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thumbnail_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.thumbnail_url_template', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url_template', full_name='EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto.kml_url_template', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12198,
serialized_end=12435,
)
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO = descriptor.Descriptor(
name='FilmstripConfigProto',
full_name='EndSnippetProto.FilmstripConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='requirements', full_name='EndSnippetProto.FilmstripConfigProto.requirements', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='alleycat_url_template', full_name='EndSnippetProto.FilmstripConfigProto.alleycat_url_template', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fallback_alleycat_url_template', full_name='EndSnippetProto.FilmstripConfigProto.fallback_alleycat_url_template', index=2,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='metadata_url_template', full_name='EndSnippetProto.FilmstripConfigProto.metadata_url_template', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='thumbnail_url_template', full_name='EndSnippetProto.FilmstripConfigProto.thumbnail_url_template', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='kml_url_template', full_name='EndSnippetProto.FilmstripConfigProto.kml_url_template', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='featured_tours_url', full_name='EndSnippetProto.FilmstripConfigProto.featured_tours_url', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enable_viewport_fallback', full_name='EndSnippetProto.FilmstripConfigProto.enable_viewport_fallback', index=7,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='viewport_fallback_distance', full_name='EndSnippetProto.FilmstripConfigProto.viewport_fallback_distance', index=8,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_type', full_name='EndSnippetProto.FilmstripConfigProto.imagery_type', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=11650,
serialized_end=12435,
)
_ENDSNIPPETPROTO_STARDATAPROTO = descriptor.Descriptor(
name='StarDataProto',
full_name='EndSnippetProto.StarDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='EndSnippetProto.StarDataProto.url', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12437,
serialized_end=12488,
)
_ENDSNIPPETPROTO = descriptor.Descriptor(
name='EndSnippetProto',
full_name='EndSnippetProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='model', full_name='EndSnippetProto.model', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='auth_server_url', full_name='EndSnippetProto.auth_server_url', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='disable_authentication', full_name='EndSnippetProto.disable_authentication', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mfe_domains', full_name='EndSnippetProto.mfe_domains', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='mfe_lang_param', full_name='EndSnippetProto.mfe_lang_param', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("hl=$[hl]", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='ads_url_patterns', full_name='EndSnippetProto.ads_url_patterns', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reverse_geocoder_url', full_name='EndSnippetProto.reverse_geocoder_url', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='reverse_geocoder_protocol_version', full_name='EndSnippetProto.reverse_geocoder_protocol_version', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sky_database_is_available', full_name='EndSnippetProto.sky_database_is_available', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sky_database_url', full_name='EndSnippetProto.sky_database_url', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='default_web_page_intl_url', full_name='EndSnippetProto.default_web_page_intl_url', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_start_up_tips', full_name='EndSnippetProto.num_start_up_tips', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=17,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='start_up_tips_url', full_name='EndSnippetProto.start_up_tips_url', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='num_pro_start_up_tips', full_name='EndSnippetProto.num_pro_start_up_tips', index=13,
number=51, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_start_up_tips_url', full_name='EndSnippetProto.pro_start_up_tips_url', index=14,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='startup_tips_intl_url', full_name='EndSnippetProto.startup_tips_intl_url', index=15,
number=64, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='user_guide_intl_url', full_name='EndSnippetProto.user_guide_intl_url', index=16,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_center_intl_url', full_name='EndSnippetProto.support_center_intl_url', index=17,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='business_listing_intl_url', full_name='EndSnippetProto.business_listing_intl_url', index=18,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_answer_intl_url', full_name='EndSnippetProto.support_answer_intl_url', index=19,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_topic_intl_url', full_name='EndSnippetProto.support_topic_intl_url', index=20,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='support_request_intl_url', full_name='EndSnippetProto.support_request_intl_url', index=21,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='earth_intl_url', full_name='EndSnippetProto.earth_intl_url', index=22,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='add_content_url', full_name='EndSnippetProto.add_content_url', index=23,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sketchup_not_installed_url', full_name='EndSnippetProto.sketchup_not_installed_url', index=24,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sketchup_error_url', full_name='EndSnippetProto.sketchup_error_url', index=25,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='free_license_url', full_name='EndSnippetProto.free_license_url', index=26,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_license_url', full_name='EndSnippetProto.pro_license_url', index=27,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='tutorial_url', full_name='EndSnippetProto.tutorial_url', index=28,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='keyboard_shortcuts_url', full_name='EndSnippetProto.keyboard_shortcuts_url', index=29,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='release_notes_url', full_name='EndSnippetProto.release_notes_url', index=30,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='hide_user_data', full_name='EndSnippetProto.hide_user_data', index=31,
number=26, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='use_ge_logo', full_name='EndSnippetProto.use_ge_logo', index=32,
number=27, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_description_url_base', full_name='EndSnippetProto.diorama_description_url_base', index=33,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_default_color', full_name='EndSnippetProto.diorama_default_color', index=34,
number=29, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4291281607,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='diorama_blacklist_url', full_name='EndSnippetProto.diorama_blacklist_url', index=35,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='client_options', full_name='EndSnippetProto.client_options', index=36,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='fetching_options', full_name='EndSnippetProto.fetching_options', index=37,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time_machine_options', full_name='EndSnippetProto.time_machine_options', index=38,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='csi_options', full_name='EndSnippetProto.csi_options', index=39,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_tab', full_name='EndSnippetProto.search_tab', index=40,
number=34, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cobrand_info', full_name='EndSnippetProto.cobrand_info', index=41,
number=35, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='valid_database', full_name='EndSnippetProto.valid_database', index=42,
number=36, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='config_script', full_name='EndSnippetProto.config_script', index=43,
number=37, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deauth_server_url', full_name='EndSnippetProto.deauth_server_url', index=44,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='swoop_parameters', full_name='EndSnippetProto.swoop_parameters', index=45,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='bbs_server_info', full_name='EndSnippetProto.bbs_server_info', index=46,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='data_error_server_info', full_name='EndSnippetProto.data_error_server_info', index=47,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='planetary_database', full_name='EndSnippetProto.planetary_database', index=48,
number=42, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='log_server', full_name='EndSnippetProto.log_server', index=49,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='autopia_options', full_name='EndSnippetProto.autopia_options', index=50,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_config', full_name='EndSnippetProto.search_config', index=51,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='search_info', full_name='EndSnippetProto.search_info', index=52,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_service_base_url', full_name='EndSnippetProto.elevation_service_base_url', index=53,
number=46, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("http://maps.google.com/maps/api/elevation/", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='elevation_profile_query_delay', full_name='EndSnippetProto.elevation_profile_query_delay', index=54,
number=47, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=500,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_upgrade_url', full_name='EndSnippetProto.pro_upgrade_url', index=55,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='earth_community_url', full_name='EndSnippetProto.earth_community_url', index=56,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='google_maps_url', full_name='EndSnippetProto.google_maps_url', index=57,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='sharing_url', full_name='EndSnippetProto.sharing_url', index=58,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='privacy_policy_url', full_name='EndSnippetProto.privacy_policy_url', index=59,
number=59, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='do_gplus_user_check', full_name='EndSnippetProto.do_gplus_user_check', index=60,
number=60, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='rocktree_data_proto', full_name='EndSnippetProto.rocktree_data_proto', index=61,
number=61, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='filmstrip_config', full_name='EndSnippetProto.filmstrip_config', index=62,
number=62, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='show_signin_button', full_name='EndSnippetProto.show_signin_button', index=63,
number=63, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_measure_upsell_url', full_name='EndSnippetProto.pro_measure_upsell_url', index=64,
number=65, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pro_print_upsell_url', full_name='EndSnippetProto.pro_print_upsell_url', index=65,
number=66, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='star_data_proto', full_name='EndSnippetProto.star_data_proto', index=66,
number=67, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='feedback_url', full_name='EndSnippetProto.feedback_url', index=67,
number=68, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENDSNIPPETPROTO_SEARCHCONFIGPROTO, _ENDSNIPPETPROTO_SEARCHINFOPROTO, _ENDSNIPPETPROTO_ROCKTREEDATAPROTO, _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO, _ENDSNIPPETPROTO_STARDATAPROTO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6857,
serialized_end=12488,
)
_DBROOTREFPROTO = descriptor.Descriptor(
name='DbRootRefProto',
full_name='DbRootRefProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='url', full_name='DbRootRefProto.url', index=0,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_critical', full_name='DbRootRefProto.is_critical', index=1,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='requirements', full_name='DbRootRefProto.requirements', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12490,
serialized_end=12588,
)
_DATABASEVERSIONPROTO = descriptor.Descriptor(
name='DatabaseVersionProto',
full_name='DatabaseVersionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='quadtree_version', full_name='DatabaseVersionProto.quadtree_version', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12590,
serialized_end=12638,
)
_DBROOTPROTO = descriptor.Descriptor(
name='DbRootProto',
full_name='DbRootProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='database_name', full_name='DbRootProto.database_name', index=0,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='imagery_present', full_name='DbRootProto.imagery_present', index=1,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='proto_imagery', full_name='DbRootProto.proto_imagery', index=2,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='terrain_present', full_name='DbRootProto.terrain_present', index=3,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='provider_info', full_name='DbRootProto.provider_info', index=4,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='nested_feature', full_name='DbRootProto.nested_feature', index=5,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style_attribute', full_name='DbRootProto.style_attribute', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='style_map', full_name='DbRootProto.style_map', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='end_snippet', full_name='DbRootProto.end_snippet', index=8,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='translation_entry', full_name='DbRootProto.translation_entry', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='language', full_name='DbRootProto.language', index=10,
number=9, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("en", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='version', full_name='DbRootProto.version', index=11,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dbroot_reference', full_name='DbRootProto.dbroot_reference', index=12,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='database_version', full_name='DbRootProto.database_version', index=13,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='refresh_timeout', full_name='DbRootProto.refresh_timeout', index=14,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=12641,
serialized_end=13207,
)
_ENCRYPTEDDBROOTPROTO = descriptor.Descriptor(
name='EncryptedDbRootProto',
full_name='EncryptedDbRootProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='encryption_type', full_name='EncryptedDbRootProto.encryption_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='encryption_data', full_name='EncryptedDbRootProto.encryption_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dbroot_data', full_name='EncryptedDbRootProto.dbroot_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=13210,
serialized_end=13379,
)
_PROVIDERINFOPROTO.fields_by_name['copyright_string'].message_type = _STRINGIDORVALUEPROTO
_POPUPPROTO.fields_by_name['text'].message_type = _STRINGIDORVALUEPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['placemark_icon_path'].message_type = _STRINGIDORVALUEPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['pop_up'].message_type = _POPUPPROTO
_STYLEATTRIBUTEPROTO.fields_by_name['draw_flag'].message_type = _DRAWFLAGPROTO
_DRAWFLAGPROTO.fields_by_name['draw_flag_type'].enum_type = _DRAWFLAGPROTO_DRAWFLAGTYPE
_DRAWFLAGPROTO_DRAWFLAGTYPE.containing_type = _DRAWFLAGPROTO;
_LAYERPROTO.fields_by_name['zoom_range'].message_type = _ZOOMRANGEPROTO
_NESTEDFEATUREPROTO.fields_by_name['feature_type'].enum_type = _NESTEDFEATUREPROTO_FEATURETYPE
_NESTEDFEATUREPROTO.fields_by_name['kml_url'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['layer'].message_type = _LAYERPROTO
_NESTEDFEATUREPROTO.fields_by_name['folder'].message_type = _FOLDERPROTO
_NESTEDFEATUREPROTO.fields_by_name['requirement'].message_type = _REQUIREMENTPROTO
_NESTEDFEATUREPROTO.fields_by_name['display_name'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['description'].message_type = _STRINGIDORVALUEPROTO
_NESTEDFEATUREPROTO.fields_by_name['look_at'].message_type = _LOOKATPROTO
_NESTEDFEATUREPROTO.fields_by_name['children'].message_type = _NESTEDFEATUREPROTO
_NESTEDFEATUREPROTO_FEATURETYPE.containing_type = _NESTEDFEATUREPROTO;
_MFEDOMAINFEATURESPROTO.fields_by_name['supported_features'].enum_type = _MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE
_MFEDOMAINFEATURESPROTO_SUPPORTEDFEATURE.containing_type = _MFEDOMAINFEATURESPROTO;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING.fields_by_name['weather_type'].enum_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING.containing_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING_WEATHERTYPE.containing_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING;
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS.fields_by_name['weather_mapping'].message_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING
_CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO_CAPTUREOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO_MAPSOPTIONS.containing_type = _CLIENTOPTIONSPROTO;
_CLIENTOPTIONSPROTO.fields_by_name['precipitations_options'].message_type = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS
_CLIENTOPTIONSPROTO.fields_by_name['capture_options'].message_type = _CLIENTOPTIONSPROTO_CAPTUREOPTIONS
_CLIENTOPTIONSPROTO.fields_by_name['maps_options'].message_type = _CLIENTOPTIONSPROTO_MAPSOPTIONS
_SEARCHTABPROTO_INPUTBOXINFO.fields_by_name['label'].message_type = _STRINGIDORVALUEPROTO
_SEARCHTABPROTO_INPUTBOXINFO.containing_type = _SEARCHTABPROTO;
_SEARCHTABPROTO.fields_by_name['tab_label'].message_type = _STRINGIDORVALUEPROTO
_SEARCHTABPROTO.fields_by_name['input_box'].message_type = _SEARCHTABPROTO_INPUTBOXINFO
_SEARCHTABPROTO.fields_by_name['requirement'].message_type = _REQUIREMENTPROTO
_COBRANDPROTO_COORD.containing_type = _COBRANDPROTO;
_COBRANDPROTO.fields_by_name['x_coord'].message_type = _COBRANDPROTO_COORD
_COBRANDPROTO.fields_by_name['y_coord'].message_type = _COBRANDPROTO_COORD
_COBRANDPROTO.fields_by_name['tie_point'].enum_type = _COBRANDPROTO_TIEPOINT
_COBRANDPROTO_TIEPOINT.containing_type = _COBRANDPROTO;
_DATABASEDESCRIPTIONPROTO.fields_by_name['database_name'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['base_url'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['post_wizard_path'].message_type = _STRINGIDORVALUEPROTO
_POSTINGSERVERPROTO.fields_by_name['file_submit_path'].message_type = _STRINGIDORVALUEPROTO
_PLANETARYDATABASEPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_PLANETARYDATABASEPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_LOGSERVERPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.fields_by_name['label'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['name'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['type'].enum_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['html_transform_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['kml_transform_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['supplemental_ui'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['suggestion'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['searchlet'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.fields_by_name['suggest_server'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_RESULTTYPE.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.fields_by_name['service_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO.containing_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO;
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['search_server'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['onebox_service'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['kml_search_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['kml_render_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['search_history_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.fields_by_name['error_page_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_SEARCHCONFIGPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_SEARCHINFOPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_ROCKTREEDATAPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['metadata_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['thumbnail_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.fields_by_name['kml_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO.containing_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO;
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['alleycat_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['fallback_alleycat_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['metadata_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['thumbnail_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['kml_url_template'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['featured_tours_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.fields_by_name['imagery_type'].message_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO
_ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO_STARDATAPROTO.fields_by_name['url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO_STARDATAPROTO.containing_type = _ENDSNIPPETPROTO;
_ENDSNIPPETPROTO.fields_by_name['model'].message_type = _PLANETMODELPROTO
_ENDSNIPPETPROTO.fields_by_name['auth_server_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['mfe_domains'].message_type = _MFEDOMAINFEATURESPROTO
_ENDSNIPPETPROTO.fields_by_name['reverse_geocoder_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sky_database_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['default_web_page_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['start_up_tips_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_start_up_tips_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['startup_tips_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['user_guide_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_center_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['business_listing_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_answer_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_topic_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['support_request_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['earth_intl_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['add_content_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sketchup_not_installed_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sketchup_error_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['free_license_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_license_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['tutorial_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['keyboard_shortcuts_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['release_notes_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['diorama_description_url_base'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['diorama_blacklist_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['client_options'].message_type = _CLIENTOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['fetching_options'].message_type = _FETCHINGOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['time_machine_options'].message_type = _TIMEMACHINEOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['csi_options'].message_type = _CSIOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['search_tab'].message_type = _SEARCHTABPROTO
_ENDSNIPPETPROTO.fields_by_name['cobrand_info'].message_type = _COBRANDPROTO
_ENDSNIPPETPROTO.fields_by_name['valid_database'].message_type = _DATABASEDESCRIPTIONPROTO
_ENDSNIPPETPROTO.fields_by_name['config_script'].message_type = _CONFIGSCRIPTPROTO
_ENDSNIPPETPROTO.fields_by_name['deauth_server_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['swoop_parameters'].message_type = _SWOOPPARAMSPROTO
_ENDSNIPPETPROTO.fields_by_name['bbs_server_info'].message_type = _POSTINGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['data_error_server_info'].message_type = _POSTINGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['planetary_database'].message_type = _PLANETARYDATABASEPROTO
_ENDSNIPPETPROTO.fields_by_name['log_server'].message_type = _LOGSERVERPROTO
_ENDSNIPPETPROTO.fields_by_name['autopia_options'].message_type = _AUTOPIAOPTIONSPROTO
_ENDSNIPPETPROTO.fields_by_name['search_config'].message_type = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO
_ENDSNIPPETPROTO.fields_by_name['search_info'].message_type = _ENDSNIPPETPROTO_SEARCHINFOPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_upgrade_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['earth_community_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['google_maps_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['sharing_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['privacy_policy_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['rocktree_data_proto'].message_type = _ENDSNIPPETPROTO_ROCKTREEDATAPROTO
_ENDSNIPPETPROTO.fields_by_name['filmstrip_config'].message_type = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_measure_upsell_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['pro_print_upsell_url'].message_type = _STRINGIDORVALUEPROTO
_ENDSNIPPETPROTO.fields_by_name['star_data_proto'].message_type = _ENDSNIPPETPROTO_STARDATAPROTO
_ENDSNIPPETPROTO.fields_by_name['feedback_url'].message_type = _STRINGIDORVALUEPROTO
_DBROOTREFPROTO.fields_by_name['requirements'].message_type = _REQUIREMENTPROTO
_DBROOTPROTO.fields_by_name['database_name'].message_type = _STRINGIDORVALUEPROTO
_DBROOTPROTO.fields_by_name['provider_info'].message_type = _PROVIDERINFOPROTO
_DBROOTPROTO.fields_by_name['nested_feature'].message_type = _NESTEDFEATUREPROTO
_DBROOTPROTO.fields_by_name['style_attribute'].message_type = _STYLEATTRIBUTEPROTO
_DBROOTPROTO.fields_by_name['style_map'].message_type = _STYLEMAPPROTO
_DBROOTPROTO.fields_by_name['end_snippet'].message_type = _ENDSNIPPETPROTO
_DBROOTPROTO.fields_by_name['translation_entry'].message_type = _STRINGENTRYPROTO
_DBROOTPROTO.fields_by_name['dbroot_reference'].message_type = _DBROOTREFPROTO
_DBROOTPROTO.fields_by_name['database_version'].message_type = _DATABASEVERSIONPROTO
_ENCRYPTEDDBROOTPROTO.fields_by_name['encryption_type'].enum_type = _ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE
_ENCRYPTEDDBROOTPROTO_ENCRYPTIONTYPE.containing_type = _ENCRYPTEDDBROOTPROTO;
DESCRIPTOR.message_types_by_name['StringEntryProto'] = _STRINGENTRYPROTO
DESCRIPTOR.message_types_by_name['StringIdOrValueProto'] = _STRINGIDORVALUEPROTO
DESCRIPTOR.message_types_by_name['PlanetModelProto'] = _PLANETMODELPROTO
DESCRIPTOR.message_types_by_name['ProviderInfoProto'] = _PROVIDERINFOPROTO
DESCRIPTOR.message_types_by_name['PopUpProto'] = _POPUPPROTO
DESCRIPTOR.message_types_by_name['StyleAttributeProto'] = _STYLEATTRIBUTEPROTO
DESCRIPTOR.message_types_by_name['StyleMapProto'] = _STYLEMAPPROTO
DESCRIPTOR.message_types_by_name['ZoomRangeProto'] = _ZOOMRANGEPROTO
DESCRIPTOR.message_types_by_name['DrawFlagProto'] = _DRAWFLAGPROTO
DESCRIPTOR.message_types_by_name['LayerProto'] = _LAYERPROTO
DESCRIPTOR.message_types_by_name['FolderProto'] = _FOLDERPROTO
DESCRIPTOR.message_types_by_name['RequirementProto'] = _REQUIREMENTPROTO
DESCRIPTOR.message_types_by_name['LookAtProto'] = _LOOKATPROTO
DESCRIPTOR.message_types_by_name['NestedFeatureProto'] = _NESTEDFEATUREPROTO
DESCRIPTOR.message_types_by_name['MfeDomainFeaturesProto'] = _MFEDOMAINFEATURESPROTO
DESCRIPTOR.message_types_by_name['ClientOptionsProto'] = _CLIENTOPTIONSPROTO
DESCRIPTOR.message_types_by_name['FetchingOptionsProto'] = _FETCHINGOPTIONSPROTO
DESCRIPTOR.message_types_by_name['TimeMachineOptionsProto'] = _TIMEMACHINEOPTIONSPROTO
DESCRIPTOR.message_types_by_name['AutopiaOptionsProto'] = _AUTOPIAOPTIONSPROTO
DESCRIPTOR.message_types_by_name['CSIOptionsProto'] = _CSIOPTIONSPROTO
DESCRIPTOR.message_types_by_name['SearchTabProto'] = _SEARCHTABPROTO
DESCRIPTOR.message_types_by_name['CobrandProto'] = _COBRANDPROTO
DESCRIPTOR.message_types_by_name['DatabaseDescriptionProto'] = _DATABASEDESCRIPTIONPROTO
DESCRIPTOR.message_types_by_name['ConfigScriptProto'] = _CONFIGSCRIPTPROTO
DESCRIPTOR.message_types_by_name['SwoopParamsProto'] = _SWOOPPARAMSPROTO
DESCRIPTOR.message_types_by_name['PostingServerProto'] = _POSTINGSERVERPROTO
DESCRIPTOR.message_types_by_name['PlanetaryDatabaseProto'] = _PLANETARYDATABASEPROTO
DESCRIPTOR.message_types_by_name['LogServerProto'] = _LOGSERVERPROTO
DESCRIPTOR.message_types_by_name['EndSnippetProto'] = _ENDSNIPPETPROTO
DESCRIPTOR.message_types_by_name['DbRootRefProto'] = _DBROOTREFPROTO
DESCRIPTOR.message_types_by_name['DatabaseVersionProto'] = _DATABASEVERSIONPROTO
DESCRIPTOR.message_types_by_name['DbRootProto'] = _DBROOTPROTO
DESCRIPTOR.message_types_by_name['EncryptedDbRootProto'] = _ENCRYPTEDDBROOTPROTO
class StringEntryProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STRINGENTRYPROTO
# @@protoc_insertion_point(class_scope:StringEntryProto)
class StringIdOrValueProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STRINGIDORVALUEPROTO
# @@protoc_insertion_point(class_scope:StringIdOrValueProto)
class PlanetModelProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLANETMODELPROTO
# @@protoc_insertion_point(class_scope:PlanetModelProto)
class ProviderInfoProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PROVIDERINFOPROTO
# @@protoc_insertion_point(class_scope:ProviderInfoProto)
class PopUpProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POPUPPROTO
# @@protoc_insertion_point(class_scope:PopUpProto)
class StyleAttributeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STYLEATTRIBUTEPROTO
# @@protoc_insertion_point(class_scope:StyleAttributeProto)
class StyleMapProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _STYLEMAPPROTO
# @@protoc_insertion_point(class_scope:StyleMapProto)
class ZoomRangeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ZOOMRANGEPROTO
# @@protoc_insertion_point(class_scope:ZoomRangeProto)
class DrawFlagProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DRAWFLAGPROTO
# @@protoc_insertion_point(class_scope:DrawFlagProto)
class LayerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LAYERPROTO
# @@protoc_insertion_point(class_scope:LayerProto)
class FolderProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FOLDERPROTO
# @@protoc_insertion_point(class_scope:FolderProto)
class RequirementProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REQUIREMENTPROTO
# @@protoc_insertion_point(class_scope:RequirementProto)
class LookAtProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOOKATPROTO
# @@protoc_insertion_point(class_scope:LookAtProto)
class NestedFeatureProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NESTEDFEATUREPROTO
# @@protoc_insertion_point(class_scope:NestedFeatureProto)
class MfeDomainFeaturesProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MFEDOMAINFEATURESPROTO
# @@protoc_insertion_point(class_scope:MfeDomainFeaturesProto)
class ClientOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class PrecipitationsOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class WeatherMapping(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS_WEATHERMAPPING
# @@protoc_insertion_point(class_scope:ClientOptionsProto.PrecipitationsOptions.WeatherMapping)
DESCRIPTOR = _CLIENTOPTIONSPROTO_PRECIPITATIONSOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.PrecipitationsOptions)
class CaptureOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_CAPTUREOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.CaptureOptions)
class MapsOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CLIENTOPTIONSPROTO_MAPSOPTIONS
# @@protoc_insertion_point(class_scope:ClientOptionsProto.MapsOptions)
DESCRIPTOR = _CLIENTOPTIONSPROTO
# @@protoc_insertion_point(class_scope:ClientOptionsProto)
class FetchingOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FETCHINGOPTIONSPROTO
# @@protoc_insertion_point(class_scope:FetchingOptionsProto)
class TimeMachineOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TIMEMACHINEOPTIONSPROTO
# @@protoc_insertion_point(class_scope:TimeMachineOptionsProto)
class AutopiaOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _AUTOPIAOPTIONSPROTO
# @@protoc_insertion_point(class_scope:AutopiaOptionsProto)
class CSIOptionsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CSIOPTIONSPROTO
# @@protoc_insertion_point(class_scope:CSIOptionsProto)
class SearchTabProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class InputBoxInfo(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SEARCHTABPROTO_INPUTBOXINFO
# @@protoc_insertion_point(class_scope:SearchTabProto.InputBoxInfo)
DESCRIPTOR = _SEARCHTABPROTO
# @@protoc_insertion_point(class_scope:SearchTabProto)
class CobrandProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class Coord(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _COBRANDPROTO_COORD
# @@protoc_insertion_point(class_scope:CobrandProto.Coord)
DESCRIPTOR = _COBRANDPROTO
# @@protoc_insertion_point(class_scope:CobrandProto)
class DatabaseDescriptionProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATABASEDESCRIPTIONPROTO
# @@protoc_insertion_point(class_scope:DatabaseDescriptionProto)
class ConfigScriptProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CONFIGSCRIPTPROTO
# @@protoc_insertion_point(class_scope:ConfigScriptProto)
class SwoopParamsProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SWOOPPARAMSPROTO
# @@protoc_insertion_point(class_scope:SwoopParamsProto)
class PostingServerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _POSTINGSERVERPROTO
# @@protoc_insertion_point(class_scope:PostingServerProto)
class PlanetaryDatabaseProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PLANETARYDATABASEPROTO
# @@protoc_insertion_point(class_scope:PlanetaryDatabaseProto)
class LogServerProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LOGSERVERPROTO
# @@protoc_insertion_point(class_scope:LogServerProto)
class EndSnippetProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SearchConfigProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SearchServer(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class SupplementalUi(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SUPPLEMENTALUI
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer.SupplementalUi)
class SearchletProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER_SEARCHLETPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer.SearchletProto)
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_SEARCHSERVER
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.SearchServer)
class OneboxServiceProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO_ONEBOXSERVICEPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto.OneboxServiceProto)
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHCONFIGPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchConfigProto)
class SearchInfoProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_SEARCHINFOPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.SearchInfoProto)
class RockTreeDataProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_ROCKTREEDATAPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.RockTreeDataProto)
class FilmstripConfigProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class AlleycatImageryTypeProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO_ALLEYCATIMAGERYTYPEPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.FilmstripConfigProto.AlleycatImageryTypeProto)
DESCRIPTOR = _ENDSNIPPETPROTO_FILMSTRIPCONFIGPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.FilmstripConfigProto)
class StarDataProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENDSNIPPETPROTO_STARDATAPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto.StarDataProto)
DESCRIPTOR = _ENDSNIPPETPROTO
# @@protoc_insertion_point(class_scope:EndSnippetProto)
class DbRootRefProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DBROOTREFPROTO
# @@protoc_insertion_point(class_scope:DbRootRefProto)
class DatabaseVersionProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATABASEVERSIONPROTO
# @@protoc_insertion_point(class_scope:DatabaseVersionProto)
class DbRootProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DBROOTPROTO
# @@protoc_insertion_point(class_scope:DbRootProto)
class EncryptedDbRootProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENCRYPTEDDBROOTPROTO
# @@protoc_insertion_point(class_scope:EncryptedDbRootProto)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
ianyh/heroku-buildpack-python-opencv | vendor/.heroku/lib/python2.7/test/test_socket.py | 9 | 60282 | #!/usr/bin/env python
import unittest
from test import test_support
import errno
import socket
import select
import _testcapi
import time
import traceback
import Queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
HOST = test_support.HOST
MSG = b'Michael Gilfix was here\n'
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import thread
import threading
except ImportError:
thread = None
threading = None
HOST = test_support.HOST
MSG = 'Michael Gilfix was here\n'
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = test_support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = Queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if not self.queue.empty():
msg = self.queue.get()
self.fail(msg)
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if not callable(test_func):
raise TypeError("test_func must be a callable function.")
try:
test_func()
except Exception, strerror:
self.queue.put(strerror)
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.assertRaises(socket.error, raise_error,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None)
self.assertIn('not NoneType', str(cm.exception))
# 3 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', 0, sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto('foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
self.assertEqual(sys.getrefcount(__name__), orig,
"socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1L<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1L, 2L, 3L ]
bad_values = [ -1, -2, -3, -1L, -2L, -3L ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith('linux') or
sys.platform.startswith('freebsd') or
sys.platform.startswith('netbsd') or
sys.platform == 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255'))
self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255'))
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
self.assertEqual('\x00' * 16, f('::'))
self.assertEqual('\x00' * 16, f('0::0'))
self.assertEqual('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEqual(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04'))
self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEqual('::', f('\x00' * 16))
self.assertEqual('::1', f('\x00' * 15 + '\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
# XXX The following don't test module-level functionality...
def _get_unused_port(self, bind_address='0.0.0.0'):
"""Use a temporary socket to elicit an unused ephemeral port.
Args:
bind_address: Hostname or IP address to search for a port on.
Returns: A most likely to be unused port.
"""
tempsock = socket.socket()
tempsock.bind((bind_address, 0))
host, port = tempsock.getsockname()
tempsock.close()
return port
def testSockName(self):
# Testing getsockname()
port = self._get_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = self._get_unused_port(bind_address=host)
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number (int or long), or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, 80L)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# Issue 17269
if hasattr(socket, 'AI_NUMERICSERV'):
socket.getaddrinfo("localhost", None, 0, 0, 0, socket.AI_NUMERICSERV)
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * test_support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * test_support.SOCK_MAX_SIZE)
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
# Issue 15989
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
finally:
s.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = ''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, 'f' * 2048)
def _testSendAll(self):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), '')
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
# Issue 15989
if _testcapi.UINT_MAX < _testcapi.ULONG_MAX:
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
def _testSetBlocking(self):
pass
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
bufsize = -1 # Use default buffer size
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
SocketConnectedTest.setUp(self)
self.serv_file = self.cli_conn.makefile('rb', self.bufsize)
def tearDown(self):
self.serv_file.close()
self.assertTrue(self.serv_file.closed)
SocketConnectedTest.tearDown(self)
self.serv_file = None
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.cli_file = self.serv_conn.makefile('wb')
def clientTearDown(self):
self.cli_file.close()
self.assertTrue(self.cli_file.closed)
self.cli_file = None
SocketConnectedTest.clientTearDown(self)
def testSmallRead(self):
# Performing small file read test
first_seg = self.serv_file.read(len(MSG)-3)
second_seg = self.serv_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, MSG)
def _testSmallRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testFullRead(self):
# read until EOF
msg = self.serv_file.read()
self.assertEqual(msg, MSG)
def _testFullRead(self):
self.cli_file.write(MSG)
self.cli_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = ''
while 1:
char = self.serv_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, MSG)
def _testUnbufferedRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadline(self):
# Performing file readline test
line = self.serv_file.readline()
self.assertEqual(line, MSG)
def _testReadline(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterRead(self):
a_baloo_is = self.serv_file.read(len("A baloo is"))
self.assertEqual("A baloo is", a_baloo_is)
_a_bear = self.serv_file.read(len(" a bear"))
self.assertEqual(" a bear", _a_bear)
line = self.serv_file.readline()
self.assertEqual("\n", line)
line = self.serv_file.readline()
self.assertEqual("A BALOO IS A BEAR.\n", line)
line = self.serv_file.readline()
self.assertEqual(MSG, line)
def _testReadlineAfterRead(self):
self.cli_file.write("A baloo is a bear\n")
self.cli_file.write("A BALOO IS A BEAR.\n")
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterReadNoNewline(self):
end_of_ = self.serv_file.read(len("End Of "))
self.assertEqual("End Of ", end_of_)
line = self.serv_file.readline()
self.assertEqual("Line", line)
def _testReadlineAfterReadNoNewline(self):
self.cli_file.write("End Of Line")
def testClosedAttr(self):
self.assertTrue(not self.serv_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.cli_file.closed)
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv(self, size):
return self._recv_step.next()()
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR)
def _test_readline(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.read(size), "This is the first line\n"
"And the second line is here\n")
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(bufsize=1024)
self._test_readline(size=100, bufsize=1024)
self._test_read(bufsize=1024)
self._test_read(size=100, bufsize=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "aa",
lambda : "\n",
lambda : "BB",
self._raise_eintr,
lambda : "bb",
lambda : "",
])
fo = socket._fileobject(mock_sock, bufsize=0)
self.assertEqual(fo.readline(size), "aa\n")
self.assertEqual(fo.readline(size), "BBbb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(bufsize=0)
self._test_read(size=100, bufsize=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that httplib relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.serv_file.readline() # first line
self.assertEqual(line, "A. " + MSG) # first line
self.serv_file = self.cli_conn.makefile('rb', 0)
line = self.serv_file.readline() # second line
self.assertEqual(line, "B. " + MSG) # second line
def _testUnbufferedReadline(self):
self.cli_file.write("A. " + MSG)
self.cli_file.write("B. " + MSG)
self.cli_file.flush()
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SocketMemo(object):
"""A wrapper to keep track of sent data, needed to examine write behaviour"""
def __init__(self, sock):
self._sock = sock
self.sent = []
def send(self, data, flags=0):
n = self._sock.send(data, flags)
self.sent.append(data[:n])
return n
def sendall(self, data, flags=0):
self._sock.sendall(data, flags)
self.sent.append(data)
def __getattr__(self, attr):
return getattr(self._sock, attr)
def getsent(self):
return [e.tobytes() if isinstance(e, memoryview) else e for e in self.sent]
def setUp(self):
FileObjectClassTestCase.setUp(self)
self.serv_file._sock = self.SocketMemo(self.serv_file._sock)
def testLinebufferedWrite(self):
# Write two lines, in small chunks
msg = MSG.strip()
print >> self.serv_file, msg,
print >> self.serv_file, msg
# second line:
print >> self.serv_file, msg,
print >> self.serv_file, msg,
print >> self.serv_file, msg
# third line
print >> self.serv_file, ''
self.serv_file.flush()
msg1 = "%s %s\n"%(msg, msg)
msg2 = "%s %s %s\n"%(msg, msg, msg)
msg3 = "\n"
self.assertEqual(self.serv_file._sock.getsent(), [msg1, msg2, msg3])
def _testLinebufferedWrite(self):
msg = MSG.strip()
msg1 = "%s %s\n"%(msg, msg)
msg2 = "%s %s %s\n"%(msg, msg, msg)
msg3 = "\n"
l1 = self.cli_file.readline()
self.assertEqual(l1, msg1)
l2 = self.cli_file.readline()
self.assertEqual(l2, msg2)
l3 = self.cli_file.readline()
self.assertEqual(l3, msg3)
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = test_support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = test_support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = test_support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send("done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, "done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class Urllib2FileobjectTest(unittest.TestCase):
# urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that
# it close the socket if the close c'tor argument is true
def testClose(self):
class MockSocket:
closed = False
def flush(self): pass
def close(self): self.closed = True
# must not close unless we request it: the original use of _fileobject
# by module socket requires that the underlying socket not be closed until
# the _socketobject that created the _fileobject is closed
s = MockSocket()
f = socket._fileobject(s)
f.close()
self.assertTrue(not s.closed)
s = MockSocket()
f = socket._fileobject(s, close=True)
f.close()
self.assertTrue(s.closed)
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = "\x00python-test-hello\x00\xff"
s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s1.bind(address)
s1.listen(1)
s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s2.connect(s1.getsockname())
s1.accept()
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.assertRaises(socket.error, s.bind, address)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if test_support.verbose:
print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
class TIPCTest (unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
Urllib2FileobjectTest,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| mit |
tunneln/CarnotKE | jyhton/Lib/test/test_StringIO_jy.py | 9 | 1994 | import unittest
import cStringIO
from test import test_support
class TestUnicodeInput(unittest.TestCase):
def test_differences_handling_unicode(self):
# Test for the "feature" described on #1089.
#
# Basically, StringIO returns unicode objects if you feed it unicode,
# but cStringIO don't. This should change in future versions of
# CPython and Jython.
self.assertEqual(u'foo', cStringIO.StringIO(u'foo').read())
self.assertEqual('foo', cStringIO.StringIO(u'foo').read())
class TestWrite(unittest.TestCase):
def test_write_seek_write(self):
f = cStringIO.StringIO()
f.write('hello')
f.seek(2)
f.write('hi')
self.assertEquals(f.getvalue(), 'hehio')
#XXX: this should get pushed to CPython's test_StringIO
def test_write_past_end(self):
f = cStringIO.StringIO()
f.write("abcdef")
f.seek(10)
f.write("uvwxyz")
self.assertEqual(f.getvalue(), 'abcdef\x00\x00\x00\x00uvwxyz')
def test_write_seek_back_then_write(self):
# http://bugs.jython.org/issue2324
s = "abcdef"
for i in xrange(len(s)):
f = cStringIO.StringIO()
f.write(s)
f.seek(i)
f.write("x" * 47)
self.assertEqual(f.getvalue(), s[:i] + ("x" * 47))
class TestGetValueAfterClose(unittest.TestCase):
# This test, or something like it, should be really be pushed upstream
def test_getvalue_after_close(self):
f = cStringIO.StringIO('hello')
f.getvalue()
f.close()
try:
f.getvalue()
except ValueError:
pass
else:
self.fail("cStringIO.StringIO: getvalue() after close() should have raised ValueError")
def test_main():
test_support.run_unittest(TestUnicodeInput)
test_support.run_unittest(TestWrite)
test_support.run_unittest(TestGetValueAfterClose)
if __name__ == '__main__':
test_main()
| apache-2.0 |
jpwhite3/python-whirlwind-tour | examples/lab4.py | 1 | 1539 | from __future__ import print_function
import sys
import re
import glob
import argparse
def eprint(*args, **kwargs):
# Print to STDERR instead of STDOUT
print(*args, file=sys.stderr, **kwargs)
def grep(expression, filepath, ignorecase=False, invert=False):
raw_expression = re.escape(expression)
with open(filepath) as file:
for line in file:
# Enable case matching?
if ignorecase:
matches = re.search(raw_expression, line, re.I)
else:
matches = re.search(raw_expression, line)
# Invert matches if need be and print
if matches and not invert:
print(line)
elif invert and not matches:
print(line)
def main():
parser = argparse.ArgumentParser(description='This is a pure Python based clone of the GREP command')
parser.add_argument('expression', action="store", type=str, help="Regular expression to match against")
parser.add_argument('filepath', action="store", type=str, help="Path to file to search in. supports wildcard globs")
parser.add_argument('-i', action="store_true", default=False, dest="ignorecase", help="Ignore case")
parser.add_argument('-v', action="store_true", default=False, dest="invert", help="Show lines that don't match")
args = parser.parse_args()
file_list = glob.glob(args.filepath)
for f in file_list:
if len(file_list) > 1:
eprint("\nResults for file: %s" % f)
eprint("-"*(len(f)+18))
grep(args.expression, f, ignorecase=args.ignorecase, invert=args.invert)
if __name__ == '__main__':
main()
| cc0-1.0 |
Nexenta/cinder | cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py | 5 | 6841 | # Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as vnx_ex
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
import cinder.volume.drivers.emc.vnx.taskflows as vnx_taskflow
class TestTaskflow(test.TestCase):
def setUp(self):
super(TestTaskflow, self).setUp()
self.work_flow = linear_flow.Flow('test_task')
@res_mock.patch_client
def test_copy_snapshot_task(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_copy_snapshot_task_revert(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_smp_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
smp_id = engine.storage.fetch('smp_id')
self.assertEqual(15, smp_id)
@res_mock.patch_client
def test_create_smp_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateLunError,
engine.run)
smp_id = engine.storage.fetch('smp_id')
self.assertIsInstance(smp_id, failure.Failure)
@res_mock.patch_client
def test_attach_snap_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_attach_snap_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXAttachSnapError,
engine.run)
@res_mock.patch_client
def test_create_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 12,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_create_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 13,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)
@res_mock.patch_client
def test_allow_read_write_task(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_allow_read_write_task_revert(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_cg_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
snap_name = engine.storage.fetch('new_cg_snap_name')
self.assertIsInstance(snap_name, res_mock.StorageObjectMock)
@res_mock.patch_client
def test_create_cg_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)
| apache-2.0 |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/utils/process.py | 17 | 2937 | # encoding: utf-8
"""
Utilities for working with external processes.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import sys
if sys.platform == 'win32':
from ._process_win32 import system, getoutput, arg_split, check_pid
elif sys.platform == 'cli':
from ._process_cli import system, getoutput, arg_split, check_pid
else:
from ._process_posix import system, getoutput, arg_split, check_pid
from ._process_common import getoutputerror, get_output_error_code, process_handler
from . import py3compat
class FindCmdError(Exception):
pass
def find_cmd(cmd):
"""Find absolute path to executable cmd in a cross platform manner.
This function tries to determine the full path to a command line program
using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
time it will use the version that is first on the users `PATH`.
Warning, don't use this to find IPython command line programs as there
is a risk you will find the wrong one. Instead find those using the
following code and looking for the application itself::
from IPython.utils.path import get_ipython_module_path
from IPython.utils.process import pycmd2argv
argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
Parameters
----------
cmd : str
The command line program to look for.
"""
path = py3compat.which(cmd)
if path is None:
raise FindCmdError('command could not be found: %s' % cmd)
return path
def is_cmd_found(cmd):
"""Check whether executable `cmd` exists or not and return a bool."""
try:
find_cmd(cmd)
return True
except FindCmdError:
return False
def pycmd2argv(cmd):
r"""Take the path of a python command and return a list (argv-style).
This only works on Python based command line programs and will find the
location of the ``python`` executable using ``sys.executable`` to make
sure the right version is used.
For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
.com or .bat, and [, cmd] otherwise.
Parameters
----------
cmd : string
The path of the command.
Returns
-------
argv-style list.
"""
ext = os.path.splitext(cmd)[1]
if ext in ['.exe', '.com', '.bat']:
return [cmd]
else:
return [sys.executable, cmd]
def abbrev_cwd():
""" Return abbreviated version of cwd, e.g. d:mydir """
cwd = py3compat.getcwd().replace('\\','/')
drivepart = ''
tail = cwd
if sys.platform == 'win32':
if len(cwd) < 4:
return cwd
drivepart,tail = os.path.splitdrive(cwd)
parts = tail.split('/')
if len(parts) > 2:
tail = '/'.join(parts[-2:])
return (drivepart + (
cwd == '/' and '/' or tail))
| gpl-2.0 |
galfaroi/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
ayoubg/gem5-graphics | gem5-gpu/tests/quick/se_gpu/10.backprop/test.py | 1 | 1654 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Joel Hestness
options.clusters = 4
options.cmd = 'gem5_gpu_backprop'
options.options = '256'
| bsd-3-clause |
bcarr092/pyCovertAudio | src/pyCovertAudio/BFSKModulator.py | 1 | 2146 | from pyCovertAudio_lib import *
from BaseModulator import BaseModulator
from SignalFunctions import SignalFunctions
class BFSKModulator(BaseModulator):
def __init__(
self, bitsPerSymbol, sampleRate, samplesPerSymbol,
symbolExpansionFactor, separationIntervals, configuration
):
BaseModulator.__init__(
self,
bitsPerSymbol,
sampleRate,
samplesPerSymbol,
symbolExpansionFactor,
separationIntervals,
configuration
)
(
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth
) = \
python_BFSK_determine_frequencies(
self.samplesPerSymbol,
self.sampleRate,
self.carrierFrequency,
self.separationIntervals
)
def modulate(self, symbolSequence, signal, sentinel=None):
symbolSignalLength = self.samplesPerSymbol * self.symbolExpansionFactor
for symbol in symbolSequence:
symbolFrequency = self.carrierFrequency
if(symbol == 1):
symbolFrequency += self.symbol1Frequency
else:
symbolFrequency += self.symbol0Frequency
x = \
SignalFunctions.modulateFSK(
symbolSignalLength, self.sampleRate, [symbolFrequency]
)
signal.extend(x[: self.samplesPerSymbol])
signal.extend(
[0.0 for i in range(
(self.symbolExpansionFactor - 1) * self.samplesPerSymbol)]
)
def toString(self):
return (
"Modulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency:\t\t"
"%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency"
" separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n%s"
% (
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth,
BaseModulator.toString(self)
)
)
| apache-2.0 |
Khan/git-bigfile | vendor/boto/datapipeline/exceptions.py | 235 | 1471 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class PipelineDeletedException(JSONResponseError):
pass
class InvalidRequestException(JSONResponseError):
pass
class TaskNotFoundException(JSONResponseError):
pass
class PipelineNotFoundException(JSONResponseError):
pass
class InternalServiceError(JSONResponseError):
pass
| mit |
40123112/w17exam | static/Brython3.1.3-20150514-095342/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| agpl-3.0 |
sebrandon1/nova | nova/virt/libvirt/designer.py | 5 | 5322 | # Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
import six
from nova.pci import utils as pci_utils
def set_vif_guest_frontend_config(conf, mac, model, driver, queues=None):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
if queues is not None:
conf.vhost_queues = queues
def set_vif_host_backend_bridge_config(conf, brname, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for a software bridge.
"""
conf.net_type = "bridge"
conf.source_dev = brname
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_ethernet_config(conf, tapname):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
conf.script = ""
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vlan = vlan
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hostdev_pci_config(conf, pci_slot):
"""Populate a LibvirtConfigGuestHostdev instance with pci address data."""
conf.domain, conf.bus, conf.slot, conf.function = (
pci_utils.get_pci_address_fields(pci_slot))
def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = mode
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in six.iteritems(inst_type.get('extra_specs', {})):
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/html5lib/html5lib/tests/test_serializer.py | 72 | 7831 | import os
import unittest
from support import html5lib_test_files
try:
import json
except ImportError:
import simplejson as json
import html5lib
from html5lib import html5parser, serializer, constants
from html5lib.treewalkers._base import TreeWalker
optionals_loaded = []
try:
from lxml import etree
optionals_loaded.append("lxml")
except ImportError:
pass
default_namespace = constants.namespaces["html"]
class JsonWalker(TreeWalker):
def __iter__(self):
for token in self.tree:
type = token[0]
if type == "StartTag":
if len(token) == 4:
namespace, name, attrib = token[1:4]
else:
namespace = default_namespace
name, attrib = token[1:3]
yield self.startTag(namespace, name, self._convertAttrib(attrib))
elif type == "EndTag":
if len(token) == 3:
namespace, name = token[1:3]
else:
namespace = default_namespace
name = token[1]
yield self.endTag(namespace, name)
elif type == "EmptyTag":
if len(token) == 4:
namespace, name, attrib = token[1:]
else:
namespace = default_namespace
name, attrib = token[1:]
for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
yield token
elif type == "Comment":
yield self.comment(token[1])
elif type in ("Characters", "SpaceCharacters"):
for token in self.text(token[1]):
yield token
elif type == "Doctype":
if len(token) == 4:
yield self.doctype(token[1], token[2], token[3])
elif len(token) == 3:
yield self.doctype(token[1], token[2])
else:
yield self.doctype(token[1])
else:
raise ValueError("Unknown token type: " + type)
def _convertAttrib(self, attribs):
"""html5lib tree-walkers use a dict of (namespace, name): value for
attributes, but JSON cannot represent this. Convert from the format
in the serializer tests (a list of dicts with "namespace", "name",
and "value" as keys) to html5lib's tree-walker format."""
attrs = {}
for attrib in attribs:
name = (attrib["namespace"], attrib["name"])
assert(name not in attrs)
attrs[name] = attrib["value"]
return attrs
def serialize_html(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.HTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def serialize_xhtml(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.XHTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def make_test(input, expected, xhtml, options):
result = serialize_html(input, options)
if len(expected) == 1:
assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:False\n%s"%(expected[0], result, str(options))
elif result not in expected:
assert False, "Expected: %s, Received: %s" % (expected, result)
if not xhtml:
return
result = serialize_xhtml(input, options)
if len(xhtml) == 1:
assert xhtml[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:True\n%s"%(xhtml[0], result, str(options))
elif result not in xhtml:
assert False, "Expected: %s, Received: %s" % (xhtml, result)
class EncodingTestCase(unittest.TestCase):
def throwsWithLatin1(self, input):
self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
def testDoctypeName(self):
self.throwsWithLatin1([["Doctype", u"\u0101"]])
def testDoctypePublicId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"\u0101"]])
def testDoctypeSystemId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"potato", u"\u0101"]])
def testCdataCharacters(self):
self.assertEquals("<style>ā", serialize_html([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}],
["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testCharacters(self):
self.assertEquals("ā", serialize_html([["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testStartTagName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testEmptyTagName(self):
self.throwsWithLatin1([["EmptyTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testAttributeName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"\u0101", "value": u"potato"}]]])
def testAttributeValue(self):
self.assertEquals("<span potato=ā>", serialize_html([["StartTag", u"http://www.w3.org/1999/xhtml", u"span",
[{"namespace": None, "name": u"potato", "value": u"\u0101"}]]],
{"encoding": "iso-8859-1"}))
def testEndTagName(self):
self.throwsWithLatin1([["EndTag", u"http://www.w3.org/1999/xhtml", u"\u0101"]])
def testComment(self):
self.throwsWithLatin1([["Comment", u"\u0101"]])
if "lxml" in optionals_loaded:
class LxmlTestCase(unittest.TestCase):
def setUp(self):
self.parser = etree.XMLParser(resolve_entities=False)
self.treewalker = html5lib.getTreeWalker("lxml")
self.serializer = serializer.HTMLSerializer()
def testEntityReplacement(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
def testEntityXML(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>""", result)
def testEntityNoResolve(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False,
resolve_entities=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>""", result)
def test_serializer():
for filename in html5lib_test_files('serializer', '*.test'):
tests = json.load(file(filename))
test_name = os.path.basename(filename).replace('.test','')
for index, test in enumerate(tests['tests']):
xhtml = test.get("xhtml", test["expected"])
if test_name == 'optionaltags':
xhtml = None
yield make_test, test["input"], test["expected"], xhtml, test.get("options", {})
| agpl-3.0 |
davidwaroquiers/pymatgen | pymatgen/io/wannier90.py | 5 | 6189 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Modules for working with wannier90 input and output.
"""
from typing import Sequence
import numpy as np
from scipy.io import FortranEOFError, FortranFile
__author__ = "Mark Turiansky"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Jun 04, 2020"
class Unk:
"""
Object representing the data in a UNK file.
.. attribute:: ik
int index of kpoint for this file
.. attribute:: data
numpy.ndarray that contains the wavefunction data for in the UNK file.
The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and
(nbnd, 2, ngx, ngy, ngz) for noncollinear calculations.
.. attribute:: is_noncollinear
bool that specifies if data is from a noncollinear calculation
.. attribute:: nbnd
int number of bands in data
.. attribute:: ng
sequence of three integers that correspond to the grid size of the
given data. The definition is ng = (ngx, ngy, ngz).
"""
ik: int
is_noncollinear: bool
nbnd: int
ng: Sequence[int]
def __init__(self, ik: int, data: np.ndarray) -> None:
"""
Initialize Unk class.
Args:
ik (int): index of the kpoint UNK file is for
data (np.ndarray): data from the UNK file that has shape (nbnd,
ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if noncollinear
"""
self.ik = ik
self.data = data
@property
def data(self) -> np.ndarray:
"""
np.ndarray: contains the wavefunction data for in the UNK file.
The shape should be (nbnd, ngx, ngy, ngz) for regular calculations and
(nbnd, 2, ngx, ngy, ngz) for noncollinear calculations.
"""
return self._data
@data.setter
def data(self, value: np.ndarray) -> None:
"""
Sets the value of data.
Args:
value (np.ndarray): data to replace stored data, must haveshape
(nbnd, ngx, ngy, ngz) or (nbnd, 2, ngx, ngy, ngz) if
noncollinear calculation
"""
temp_val = np.array(value, dtype=np.complex128)
if len(temp_val.shape) not in [4, 5]:
raise ValueError(
"invalid data shape, must be (nbnd, ngx, ngy, ngz"
") or (nbnd, 2, ngx, ngy, ngz) for noncollinear "
f"data, given {temp_val.shape}"
)
if len(temp_val.shape) == 5 and temp_val.shape[1] != 2:
raise ValueError(
"invalid noncollinear data, shape should be (nbnd" f", 2, ngx, ngy, ngz), given {temp_val.shape}"
)
self._data = temp_val
# derived properties
self.is_noncollinear = len(self.data.shape) == 5
self.nbnd = self.data.shape[0]
self.ng = self.data.shape[-3:]
@staticmethod
def from_file(filename: str) -> object:
"""
Reads the UNK data from file.
Args:
filename (str): path to UNK file to read
Returns:
Unk object
"""
input_data = []
with FortranFile(filename, "r") as f:
*ng, ik, nbnd = f.read_ints()
for _ in range(nbnd):
input_data.append(
# when reshaping need to specify ordering as fortran
f.read_record(np.complex128).reshape(ng, order="F")
)
try:
for _ in range(nbnd):
input_data.append(f.read_record(np.complex128).reshape(ng, order="F"))
is_noncollinear = True
except FortranEOFError:
is_noncollinear = False
# mypy made me create an extra variable here >:(
data = np.array(input_data, dtype=np.complex128)
# spinors are interwoven, need to separate them
if is_noncollinear:
temp_data = np.empty((nbnd, 2, *ng), dtype=np.complex128)
temp_data[:, 0, :, :, :] = data[::2, :, :, :]
temp_data[:, 1, :, :, :] = data[1::2, :, :, :]
return Unk(ik, temp_data)
return Unk(ik, data)
def write_file(self, filename: str) -> None:
"""
Write the UNK file.
Args:
filename (str): path to UNK file to write, the name should have the
form 'UNKXXXXX.YY' where XXXXX is the kpoint index (Unk.ik) and
YY is 1 or 2 for the spin index or NC if noncollinear
"""
with FortranFile(filename, "w") as f:
f.write_record(np.array([*self.ng, self.ik, self.nbnd], dtype=np.int32))
for ib in range(self.nbnd):
if self.is_noncollinear:
f.write_record(self.data[ib, 0].flatten("F"))
f.write_record(self.data[ib, 1].flatten("F"))
else:
f.write_record(self.data[ib].flatten("F"))
def __repr__(self) -> str:
return (
f"<UNK ik={self.ik} nbnd={self.nbnd} ncl={self.is_noncollinear}"
+ f" ngx={self.ng[0]} ngy={self.ng[1]} ngz={self.ng[2]}>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Unk):
return NotImplemented
if not np.allclose(self.ng, other.ng):
return False
if self.ik != other.ik:
return False
if self.is_noncollinear != other.is_noncollinear:
return False
if self.nbnd != other.nbnd:
return False
for ib in range(self.nbnd):
if self.is_noncollinear:
if not (
np.allclose(self.data[ib, 0], other.data[ib, 0], atol=1e-4)
and np.allclose(self.data[ib, 1], other.data[ib, 1], atol=1e-4)
):
return False
else:
if not np.allclose(self.data[ib], other.data[ib], atol=1e-4):
return False
return True
| mit |
bkj/ernest | enrich/modules/enrich_terminal_nodes.py | 2 | 6773 | #!/usr/bin/env python
'''
Add single neighbor tag for owners and issuers to ownership index;
tag enables hiding terminal nodes in front end
** Note **
This runs prospectively using the --most-recent argument
'''
import argparse
import json
import logging
from elasticsearch import Elasticsearch
from elasticsearch.helpers import parallel_bulk, scan
class ENRICH_TERMINAL_NODES:
def __init__(self, args, parent_logger):
self.args = args
self.logger = logging.getLogger(parent_logger + ".terminal_nodes")
with open(args.config_path, 'r') as inf:
config = json.load(inf)
self.config = config
self.client = Elasticsearch([{
'host': config['es']['host'],
'port': config['es']['port']}
])
self.match_all = {
"query": {
"match_all": {}
}
}
def raw_dict(self, x, dict_type):
if dict_type == 'issuer':
key = x['issuerCik']
val = x['ownerCik']
elif dict_type == 'owner':
key = x['ownerCik']
val = x['issuerCik']
return {
"key": key,
"value": val
}
def build_query(self, val):
val = '__meta__.' + val + '_has_one_neighbor'
query = {
"query": {
"bool": {
"should": [
{
"filtered": {
"filter": {
"missing": {
"field": val
}
}
}
},
{
"match": {
val: True
}
}
],
"minimum_should_match": 1
}
}
}
return query
def get_terminal_nodes(self, search_type):
temp_dict = {}
for a in scan(self.client,
index=self.config['ownership']['index'],
query=self.match_all):
x = self.raw_dict(a['_source'], search_type)
if x["key"] in temp_dict:
if temp_dict[x["key"]]["terminal"] is True:
if x["value"] != temp_dict[x["key"]]["value"]:
temp_dict[x["key"]]["terminal"] = False
else:
pass
else:
pass
else:
temp_dict[x["key"]] = {
"value": x["value"],
"terminal": True
}
return [key for key in temp_dict if temp_dict[key]['terminal'] is True]
def get_update_nodes(self, query_type):
gtn = self.get_terminal_nodes(query_type)
if self.args.from_scratch:
query = self.build_query(query_type)
else:
query = {"query": {
"bool": {
"must_not": {
"match": {
"__meta__." + query_type + "_has_one_neighbor": True
}
},
"must": {
"terms": {
}
}
}
}}
return query, gtn
def main(self, query_type):
actions = []
query, t_nodes = self.get_update_nodes(query_type)
i = 0
tn = [t_nodes[j: j + 1024] for j in range(0, len(t_nodes), 1024)]
for p in tn:
query["query"]["bool"]["must"]["terms"][query_type + "Cik"] = p
for person in scan(self.client,
index=self.config['ownership']['index'],
query=query):
actions.append({
"_op_type": "update",
"_index": self.config['ownership']['index'],
"_id": person['_id'],
"_type": person['_type'],
"doc": {
"__meta__": {
query_type + "_has_one_neighbor": True
}
}
})
i += 1
if i > 500:
for success, info in parallel_bulk(self.client,
actions,
chunk_size=510):
if not success:
self.logger.error('[RESPONSE]|{}'.format(info))
else:
self.logger.info('[RESPONSE]|{}'.format(info))
actions = []
i = 0
for success, info in parallel_bulk(self.client,
actions,
chunk_size=510):
if not success:
self.logger.error('[RESPONSE]|{}'.format(info))
else:
self.logger.info('[RESPONSE]|{}'.format(info))
f_query = {
"query": {
"bool": {
"must_not": {
"terms": {
query_type + 'Cik': t_nodes
}
},
"must": {
"match": {
"__meta__." + query_type + "_has_one_neighbor": True
}
}
}
}
}
for a in scan(self.client,
index=self.config['ownership']['index'],
query=f_query):
a['_source']['__meta__'][query_type + '_has_one_neighbor'] = False
res = self.client.index(
index=self.config['ownership']['index'],
doc_type=a['_type'],
body=a['_source'],
id=a['_id']
)
self.logger.info(res)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='add single neighbor tags')
parser.add_argument('--from-scratch',
dest='from_scratch',
action="store_true")
parser.add_argument('--most-recent',
dest='most_recent',
action="store_true")
parser.add_argument('--config-path',
type=str,
action='store',
default='../config.json')
args = parser.parse_args()
| apache-2.0 |
midgetspy/Sick-Beard | lib/hachoir_core/error.py | 90 | 1350 | """
Functions to display an error (error, warning or information) message.
"""
from lib.hachoir_core.log import log
from lib.hachoir_core.tools import makePrintable
import sys, traceback
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
sys.exc_clear()
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
class HachoirError(Exception):
"""
Parent of all errors in Hachoir library
"""
def __init__(self, message):
message_bytes = makePrintable(message, "ASCII")
Exception.__init__(self, message_bytes)
self.text = message
def __unicode__(self):
return self.text
# Error classes which may be raised by Hachoir core
# FIXME: Add EnvironmentError (IOError or OSError) and AssertionError?
# FIXME: Remove ArithmeticError and RuntimeError?
HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError,
TypeError, ValueError, ArithmeticError, RuntimeError)
info = log.info
warning = log.warning
error = log.error
| gpl-3.0 |
lberruti/ansible-modules-extras | notification/irc.py | 41 | 6075 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: irc
version_added: "1.2"
short_description: Send a message to an IRC channel
description:
- Send a message to an IRC channel. This is a very simplistic implementation.
options:
server:
description:
- IRC server name/address
required: false
default: localhost
port:
description:
- IRC server port number
required: false
default: 6667
nick:
description:
- Nickname. May be shortened, depending on server's NICKLEN setting.
required: false
default: ansible
msg:
description:
- The message body.
required: true
default: null
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
required: false
default: "none"
choices: [ "none", "yellow", "red", "green", "blue", "black" ]
channel:
description:
- Channel name
required: true
key:
description:
- Channel key
required: false
version_added: 1.7
passwd:
description:
- Server password
required: false
timeout:
description:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
version_added: 1.5
use_ssl:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
default: False
version_added: 1.8
# informational: requirements for nodes
requirements: [ socket ]
author: Jan-Piet Mens, Matt Martz
'''
EXAMPLES = '''
- irc: server=irc.example.net channel="#t1" msg="Hello world"
- local_action: irc port=6669
channel="#t1"
msg="All finished at {{ ansible_date_time.iso8601 }}"
color=red
nick=ansibleIRC
'''
# ===========================================
# IRC module support methods.
#
import re
import socket
import ssl
from time import sleep
def send_msg(channel, msg, server='localhost', port='6667', key=None,
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False):
'''send message to IRC'''
colornumbers = {
'black': "01",
'red': "04",
'green': "09",
'yellow': "08",
'blue': "12",
}
try:
colornumber = colornumbers[color]
colortext = "\x03" + colornumber
except:
colortext = ""
message = colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
irc = ssl.wrap_socket(irc)
irc.connect((server, int(port)))
if passwd:
irc.send('PASS %s\r\n' % passwd)
irc.send('NICK %s\r\n' % nick)
irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick))
motd = ''
start = time.time()
while 1:
motd += irc.recv(1024)
# The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search('^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match:
nick = match.group('nick')
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC server welcome response')
sleep(0.5)
if key:
irc.send('JOIN %s %s\r\n' % (channel, key))
else:
irc.send('JOIN %s\r\n' % channel)
join = ''
start = time.time()
while 1:
join += irc.recv(1024)
if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M):
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')
sleep(0.5)
irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
sleep(1)
irc.send('PART %s\r\n' % channel)
irc.send('QUIT\r\n')
sleep(1)
irc.close()
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server=dict(default='localhost'),
port=dict(default=6667),
nick=dict(default='ansible'),
msg=dict(required=True),
color=dict(default="none", choices=["yellow", "red", "green",
"blue", "black", "none"]),
channel=dict(required=True),
key=dict(),
passwd=dict(),
timeout=dict(type='int', default=30),
use_ssl=dict(type='bool', default=False)
),
supports_check_mode=True
)
server = module.params["server"]
port = module.params["port"]
nick = module.params["nick"]
msg = module.params["msg"]
color = module.params["color"]
channel = module.params["channel"]
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
try:
send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl)
except Exception, e:
module.fail_json(msg="unable to send to IRC: %s" % e)
module.exit_json(changed=False, channel=channel, nick=nick,
msg=msg)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
bev-a-tron/pledgeservice | testlib/waitress/trigger.py | 31 | 7964 | ##############################################################################
#
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import asyncore
import os
import socket
import errno
from waitress.compat import thread
# Wake up a call to select() running in the main thread.
#
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
#
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
#
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
class _triggerbase(object):
"""OS-independent base class for OS-dependent trigger class."""
kind = None # subclass must set to "pipe" or "loopback"; used by repr
def __init__(self):
self._closed = False
# `lock` protects the `thunks` list from being traversed and
# appended to simultaneously.
self.lock = thread.allocate_lock()
# List of no-argument callbacks to invoke when the trigger is
# pulled. These run in the thread running the asyncore mainloop,
# regardless of which thread pulls the trigger.
self.thunks = []
def readable(self):
return True
def writable(self):
return False
def handle_connect(self):
pass
def handle_close(self):
self.close()
# Override the asyncore close() method, because it doesn't know about
# (so can't close) all the gimmicks we have open. Subclass must
# supply a _close() method to do platform-specific closing work. _close()
# will be called iff we're not already closed.
def close(self):
if not self._closed:
self._closed = True
self.del_channel()
self._close() # subclass does OS-specific stuff
def pull_trigger(self, thunk=None):
if thunk:
self.lock.acquire()
try:
self.thunks.append(thunk)
finally:
self.lock.release()
self._physical_pull()
def handle_read(self):
try:
self.recv(8192)
except (OSError, socket.error):
return
self.lock.acquire()
try:
for thunk in self.thunks:
try:
thunk()
except:
nil, t, v, tbinfo = asyncore.compact_traceback()
self.log_info(
'exception in trigger thunk: (%s:%s %s)' %
(t, v, tbinfo))
self.thunks = []
finally:
self.lock.release()
if os.name == 'posix':
class trigger(_triggerbase, asyncore.file_dispatcher):
kind = "pipe"
def __init__(self, map):
_triggerbase.__init__(self)
r, self.trigger = self._fds = os.pipe()
asyncore.file_dispatcher.__init__(self, r, map=map)
def _close(self):
for fd in self._fds:
os.close(fd)
self._fds = []
def _physical_pull(self):
os.write(self.trigger, b'x')
else: # pragma: no cover
# Windows version; uses just sockets, because a pipe isn't select'able
# on Windows.
class trigger(_triggerbase, asyncore.dispatcher):
kind = "loopback"
def __init__(self, map):
_triggerbase.__init__(self)
# Get a pair of connected sockets. The trigger is the 'w'
# end of the pair, which is connected to 'r'. 'r' is put
# in the asyncore socket map. "pulling the trigger" then
# means writing something on w, which will wake up r.
w = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up asyncore's
# select() ASAP.
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while True:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
w.connect(connect_address)
break # success
except socket.error as detail:
if detail[0] != errno.WSAEADDRINUSE:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
w.close()
raise RuntimeError("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
r, addr = a.accept() # r becomes asyncore's (self.)socket
a.close()
self.trigger = w
asyncore.dispatcher.__init__(self, r, map=map)
def _close(self):
# self.socket is r, and self.trigger is w, from __init__
self.socket.close()
self.trigger.close()
def _physical_pull(self):
self.trigger.send(b'x')
| agpl-3.0 |
epssy/hue | desktop/core/ext-py/lxml/benchmark/bench_etree.py | 30 | 10920 | import sys, copy
from itertools import *
from StringIO import StringIO
import benchbase
from benchbase import (with_attributes, with_text, onlylib,
serialized, children, nochange)
TEXT = "some ASCII text"
UTEXT = u"some klingon: \F8D2"
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
@nochange
def bench_iter_children(self, root):
for child in root:
pass
@nochange
def bench_iter_children_reversed(self, root):
for child in reversed(root):
pass
@nochange
def bench_first_child(self, root):
for i in self.repeat1000:
child = root[0]
@nochange
def bench_last_child(self, root):
for i in self.repeat1000:
child = root[-1]
@nochange
def bench_middle_child(self, root):
pos = len(root) / 2
for i in self.repeat1000:
child = root[pos]
@nochange
@with_attributes(False)
@with_text(text=True)
@onlylib('lxe', 'ET')
def bench_tostring_text_ascii(self, root):
self.etree.tostring(root, method="text")
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
def bench_tostring_text_unicode(self, root):
self.etree.tostring(root, method="text", encoding=unicode)
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe', 'ET')
def bench_tostring_text_utf16(self, root):
self.etree.tostring(root, method="text", encoding='UTF-16')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
@children
def bench_tostring_text_utf8_with_tail(self, children):
for child in children:
self.etree.tostring(child, method="text",
encoding='UTF-8', with_tail=True)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8(self, root):
self.etree.tostring(root, encoding='UTF-8')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf16(self, root):
self.etree.tostring(root, encoding='UTF-16')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8_unicode_XML(self, root):
xml = unicode(self.etree.tostring(root, encoding='UTF-8'), 'UTF-8')
self.etree.XML(xml)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_write_utf8_parse_stringIO(self, root):
f = StringIO()
self.etree.ElementTree(root).write(f, encoding='UTF-8')
f.seek(0)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_parse_stringIO(self, root_xml):
f = StringIO(root_xml)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_XML(self, root_xml):
self.etree.XML(root_xml)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_stringIO(self, root_xml):
f = StringIO(root_xml)
for event, element in self.etree.iterparse(f):
pass
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_stringIO_clear(self, root_xml):
f = StringIO(root_xml)
for event, element in self.etree.iterparse(f):
element.clear()
def bench_append_from_document(self, root1, root2):
# == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ...
for el in root2:
root1.append(el)
def bench_insert_from_document(self, root1, root2):
pos = len(root1)/2
for el in root2:
root1.insert(pos, el)
pos = pos + 1
def bench_rotate_children(self, root):
# == "1 2 3" # runs on any single tree independently
for i in range(100):
el = root[0]
del root[0]
root.append(el)
def bench_reorder(self, root):
for i in range(1,len(root)/2):
el = root[0]
del root[0]
root[-i:-i] = [ el ]
def bench_reorder_slice(self, root):
for i in range(1,len(root)/2):
els = root[0:1]
del root[0]
root[-i:-i] = els
def bench_clear(self, root):
root.clear()
@nochange
@children
def bench_has_children(self, children):
for child in children:
if child and child and child and child and child:
pass
@nochange
@children
def bench_len(self, children):
for child in children:
map(len, repeat(child, 20))
@children
def bench_create_subelements(self, children):
SubElement = self.etree.SubElement
for child in children:
SubElement(child, '{test}test')
def bench_append_elements(self, root):
Element = self.etree.Element
for child in root:
el = Element('{test}test')
child.append(el)
@nochange
@children
def bench_makeelement(self, children):
empty_attrib = {}
for child in children:
child.makeelement('{test}test', empty_attrib)
@nochange
@children
def bench_create_elements(self, children):
Element = self.etree.Element
for child in children:
Element('{test}test')
@children
def bench_replace_children_element(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child[:] = [el]
@children
def bench_replace_children(self, children):
els = [ self.etree.Element("newchild") ]
for child in children:
child[:] = els
def bench_remove_children(self, root):
for child in root:
root.remove(child)
def bench_remove_children_reversed(self, root):
for child in reversed(root):
root.remove(child)
@children
def bench_set_attributes(self, children):
for child in children:
child.set('a', 'bla')
@with_attributes(True)
@children
@nochange
def bench_get_attributes(self, children):
for child in children:
child.get('bla1')
child.get('{attr}test1')
@children
def bench_setget_attributes(self, children):
for child in children:
child.set('a', 'bla')
for child in children:
child.get('a')
@nochange
def bench_root_getchildren(self, root):
root.getchildren()
@nochange
def bench_root_list_children(self, root):
list(root)
@nochange
@children
def bench_getchildren(self, children):
for child in children:
child.getchildren()
@nochange
@children
def bench_get_children_slice(self, children):
for child in children:
child[:]
@nochange
@children
def bench_get_children_slice_2x(self, children):
for child in children:
child[:]
child[:]
@nochange
@children
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy(self, children):
for child in children:
copy.deepcopy(child)
@nochange
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy_all(self, root):
copy.deepcopy(root)
@nochange
@children
def bench_tag(self, children):
for child in children:
child.tag
@nochange
@children
def bench_tag_repeat(self, children):
for child in children:
for i in self.repeat100:
child.tag
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text(self, children):
for child in children:
child.text
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text_repeat(self, children):
for child in children:
for i in self.repeat500:
child.text
@children
def bench_set_text(self, children):
text = TEXT
for child in children:
child.text = text
@children
def bench_set_utext(self, children):
text = UTEXT
for child in children:
child.text = text
@nochange
@onlylib('lxe')
def bench_index(self, root):
for child in root:
root.index(child)
@nochange
@onlylib('lxe')
def bench_index_slice(self, root):
for child in root[5:100]:
root.index(child, 5, 100)
@nochange
@onlylib('lxe')
def bench_index_slice_neg(self, root):
for child in root[-100:-5]:
root.index(child, start=-100, stop=-5)
@nochange
def bench_getiterator_all(self, root):
list(root.getiterator())
@nochange
def bench_getiterator_islice(self, root):
list(islice(root.getiterator(), 10, 110))
@nochange
def bench_getiterator_tag(self, root):
list(islice(root.getiterator(self.SEARCH_TAG), 3, 10))
@nochange
def bench_getiterator_tag_all(self, root):
list(root.getiterator(self.SEARCH_TAG))
@nochange
def bench_getiterator_tag_none(self, root):
list(root.getiterator("{ThisShould}NeverExist"))
@nochange
def bench_getiterator_tag_text(self, root):
[ e.text for e in root.getiterator(self.SEARCH_TAG) ]
@nochange
def bench_findall(self, root):
root.findall(".//*")
@nochange
def bench_findall_child(self, root):
root.findall(".//*/" + self.SEARCH_TAG)
@nochange
def bench_findall_tag(self, root):
root.findall(".//" + self.SEARCH_TAG)
@nochange
def bench_findall_path(self, root):
root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_xpath_path(self, root):
ns, tag = self.SEARCH_TAG[1:].split('}')
root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag),
namespaces = {'p':ns})
@nochange
@onlylib('lxe')
def bench_iterfind(self, root):
list(root.iterfind(".//*"))
@nochange
@onlylib('lxe')
def bench_iterfind_tag(self, root):
list(root.iterfind(".//" + self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_iterfind_islice(self, root):
list(islice(root.iterfind(".//*"), 10, 110))
if __name__ == '__main__':
benchbase.main(BenchMark)
| apache-2.0 |
modera/mcloud | mcloud/plugins/monitor.py | 1 | 1101 | import inject
from mcloud.application import ApplicationController
from mcloud.events import EventBus
from mcloud.plugin import IMcloudPlugin
from mcloud.plugins import Plugin
from mcloud.txdocker import IDockerClient
from twisted.internet import reactor
from twisted.python import log
from zope.interface import implements
class DockerMonitorPlugin(Plugin):
"""
Monitors docker events and emmits "containers.updated" event when non-internal
containers change their state.
"""
implements(IMcloudPlugin)
client = inject.attr(IDockerClient)
event_bus = inject.attr(EventBus)
app_controller = inject.attr(ApplicationController)
def setup(self):
# reactor.callLater(0, self.attach_to_events)
pass
def on_event(self, event):
if not self.app_controller.is_internal(event['id']):
log.msg('New docker event: %s' % event)
self.event_bus.fire_event('containers.updated', event)
def attach_to_events(self, *args):
log.msg('Start monitoring docker events')
return self.client.events(self.on_event)
| apache-2.0 |
romain-li/edx-platform | common/lib/xmodule/xmodule/textannotation_module.py | 3 | 6835 | """Text annotation module"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default_escaped,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
resources_dir = None
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
mcanthony/node-gyp | gyp/pylib/gyp/input.py | 292 | 114315 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit |
pkainz/pylearn2 | pylearn2/scripts/datasets/make_cifar100_patches_8x8.py | 41 | 2282 | """
This script makes a dataset of two million approximately whitened patches,
extracted at random uniformly from the CIFAR-100 train dataset.
This script is intended to reproduce the preprocessing used by Adam Coates
et. al. in their work from the first half of 2011 on the CIFAR-10 and
STL-10 datasets.
"""
from __future__ import print_function
from pylearn2.utils import serial
from pylearn2.datasets import preprocessing
from pylearn2.datasets.cifar100 import CIFAR100
from pylearn2.utils import string
data_dir = string.preprocess('${PYLEARN2_DATA_PATH}')
print('Loading CIFAR-100 train dataset...')
data = CIFAR100(which_set='train')
print("Preparing output directory...")
patch_dir = data_dir + '/cifar100/cifar100_patches_8x8'
serial.mkdir(patch_dir)
README = open(patch_dir + '/README', 'w')
README.write("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load.
data.pkl contains a pylearn2 Dataset object defining an unlabeled
dataset of 2 million 8x8 approximately whitened, contrast-normalized
patches drawn uniformly at random from the CIFAR-100 train set.
preprocessor.pkl contains a pylearn2 Pipeline object that was used
to extract the patches and approximately whiten / contrast normalize
them. This object is necessary when extracting features for
supervised learning or test set classification, because the
extracted features must be computed using inputs that have been
whitened with the ZCA matrix learned and stored by this Pipeline.
They were created with the pylearn2 script make_cifar100_patches.py.
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
""")
README.close()
print("Preprocessing the data...")
pipeline = preprocessing.Pipeline()
pipeline.items.append(
preprocessing.ExtractPatches(patch_shape=(8, 8), num_patches=2*1000*1000))
pipeline.items.append(
preprocessing.GlobalContrastNormalization(sqrt_bias=10., use_std=True))
pipeline.items.append(preprocessing.ZCA())
data.apply_preprocessor(preprocessor=pipeline, can_fit=True)
data.use_design_loc(patch_dir + '/data.npy')
serial.save(patch_dir + '/data.pkl', data)
serial.save(patch_dir + '/preprocessor.pkl', pipeline)
| bsd-3-clause |
MRCSDZ/subtitols | includes/fckeditor/editor/filemanager/browser/default/connectors/py/connector.py | 11 | 22691 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python.
Tested With:
Standard:
Python 2.3.3
Zope:
Zope Version: (Zope 2.8.1-final, python 2.3.5, linux2)
Python Version: 2.3.5 (#4, Mar 10 2005, 01:40:25)
[GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)]
System Platform: linux2
"""
"""
Author Notes (04 December 2005):
This module has gone through quite a few phases of change. Obviously,
I am only supporting that part of the code that I use. Initially
I had the upload directory as a part of zope (ie. uploading files
directly into Zope), before realising that there were too many
complex intricacies within Zope to deal with. Zope is one ugly piece
of code. So I decided to complement Zope by an Apache server (which
I had running anyway, and doing nothing). So I mapped all uploads
from an arbitrary server directory to an arbitrary web directory.
All the FCKeditor uploading occurred this way, and I didn't have to
stuff around with fiddling with Zope objects and the like (which are
terribly complex and something you don't want to do - trust me).
Maybe a Zope expert can touch up the Zope components. In the end,
I had FCKeditor loaded in Zope (probably a bad idea as well), and
I replaced the connector.py with an alias to a server module.
Right now, all Zope components will simple remain as is because
I've had enough of Zope.
See notes right at the end of this file for how I aliased out of Zope.
Anyway, most of you probably wont use Zope, so things are pretty
simple in that regard.
Typically, SERVER_DIR is the root of WEB_DIR (not necessarily).
Most definitely, SERVER_USERFILES_DIR points to WEB_USERFILES_DIR.
"""
import cgi
import re
import os
import string
"""
escape
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
def escape(text, replace=string.replace):
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
"""
getFCKeditorConnector
Creates a new instance of an FCKeditorConnector, and runs it
"""
def getFCKeditorConnector(context=None):
# Called from Zope. Passes the context through
connector = FCKeditorConnector(context=context)
return connector.run()
"""
FCKeditorRequest
A wrapper around the request object
Can handle normal CGI request, or a Zope request
Extend as required
"""
class FCKeditorRequest(object):
def __init__(self, context=None):
if (context is not None):
r = context.REQUEST
else:
r = cgi.FieldStorage()
self.context = context
self.request = r
def isZope(self):
if (self.context is not None):
return True
return False
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
value = None
if (self.isZope()):
value = self.request.get(key, default)
else:
if key in self.request.keys():
value = self.request[key].value
else:
value = default
return value
"""
FCKeditorConnector
The connector class
"""
class FCKeditorConnector(object):
# Configuration for FCKEditor
# can point to another server here, if linked correctly
#WEB_HOST = "http://127.0.0.1/"
WEB_HOST = ""
SERVER_DIR = "/var/www/html/"
WEB_USERFILES_FOLDER = WEB_HOST + "upload/"
SERVER_USERFILES_FOLDER = SERVER_DIR + "upload/"
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
# Class Attributes
parentFolderRe = re.compile("[\/][^\/]+[\/]?$")
"""
Constructor
"""
def __init__(self, context=None):
# The given root path will NOT be shown to the user
# Only the userFilesPath will be shown
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context=context)
self.rootPath = self.SERVER_DIR
self.userFilesFolder = self.SERVER_USERFILES_FOLDER
self.webUserFilesFolder = self.WEB_USERFILES_FOLDER
# Enables / Disables the connector
self.enabled = False # Set to True to enable this connector
# These are instance variables
self.zopeRootContext = None
self.zopeUploadContext = None
# Copied from php module =)
self.allowedExtensions = {
"File": None,
"Image": None,
"Flash": None,
"Media": None
}
self.deniedExtensions = {
"File": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Image": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Flash": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ],
"Media": [ "html","htm","php","php2","php3","php4","php5","phtml","pwml","inc","asp","aspx","ascx","jsp","cfm","cfc","pl","bat","exe","com","dll","vbs","js","reg","cgi","htaccess","asis" ]
}
"""
Zope specific functions
"""
def isZope(self):
# The context object is the zope object
if (self.context is not None):
return True
return False
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
"""
Generic manipulation functions
"""
def getUserFilesFolder(self):
return self.userFilesFolder
def getWebUserFilesFolder(self):
return self.webUserFilesFolder
def getAllowedExtensions(self, resourceType):
return self.allowedExtensions[resourceType]
def getDeniedExtensions(self, resourceType):
return self.deniedExtensions[resourceType]
def removeFromStart(self, string, char):
return string.lstrip(char)
def removeFromEnd(self, string, char):
return string.rstrip(char)
def convertToXmlAttribute(self, value):
if (value is None):
value = ""
return escape(value)
def convertToPath(self, path):
if (path[-1] <> "/"):
return path + "/"
else:
return path
def getUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getUserFilesFolder(),
resourceType,
path
)
return url
def getWebUrlFromPath(self, resourceType, path):
if (resourceType is None) or (resourceType == ''):
url = "%s%s" % (
self.removeFromEnd(self.getWebUserFilesFolder(), '/'),
path
)
else:
url = "%s%s%s" % (
self.getWebUserFilesFolder(),
resourceType,
path
)
return url
def removeExtension(self, fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(self, fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def getParentFolder(self, folderPath):
parentFolderPath = self.parentFolderRe.sub('', folderPath)
return parentFolderPath
"""
serverMapFolder
Purpose: works out the folder map on the server
"""
def serverMapFolder(self, resourceType, folderPath):
# Get the resource type directory
resourceTypeFolder = "%s%s/" % (
self.getUserFilesFolder(),
resourceType
)
# Ensure that the directory exists
self.createServerFolder(resourceTypeFolder)
# Return the resource type directory combined with the
# required path
return "%s%s" % (
resourceTypeFolder,
self.removeFromStart(folderPath, '/')
)
"""
createServerFolder
Purpose: physically creates a folder on the server
"""
def createServerFolder(self, folderPath):
# Check if the parent exists
parentFolderPath = self.getParentFolder(folderPath)
if not(os.path.exists(parentFolderPath)):
errorMsg = self.createServerFolder(parentFolderPath)
if errorMsg is not None:
return errorMsg
# Check if this exists
if not(os.path.exists(folderPath)):
os.mkdir(folderPath)
os.chmod(folderPath, 0755)
errorMsg = None
else:
if os.path.isdir(folderPath):
errorMsg = None
else:
raise "createServerFolder: Non-folder of same name already exists"
return errorMsg
"""
getRootPath
Purpose: returns the root path on the server
"""
def getRootPath(self):
return self.rootPath
"""
setXmlHeaders
Purpose: to prepare the headers for the xml to return
"""
def setXmlHeaders(self):
#now = self.context.BS_get_now()
#yesterday = now - 1
self.setHeader("Content-Type", "text/xml")
#self.setHeader("Expires", yesterday)
#self.setHeader("Last-Modified", now)
#self.setHeader("Cache-Control", "no-store, no-cache, must-revalidate")
self.printHeaders()
return
def setHeader(self, key, value):
if (self.isZope()):
self.context.REQUEST.RESPONSE.setHeader(key, value)
else:
print "%s: %s" % (key, value)
return
def printHeaders(self):
# For non-Zope requests, we need to print an empty line
# to denote the end of headers
if (not(self.isZope())):
print ""
"""
createXmlFooter
Purpose: returns the xml header
"""
def createXmlHeader(self, command, resourceType, currentFolder):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
self.convertToXmlAttribute(currentFolder),
self.convertToXmlAttribute(
self.getWebUrlFromPath(
resourceType,
currentFolder
)
),
)
return s
"""
createXmlFooter
Purpose: returns the xml footer
"""
def createXmlFooter(self):
s = """</Connector>"""
return s
"""
sendError
Purpose: in the event of an error, return an xml based error
"""
def sendError(self, number, text):
self.setXmlHeaders()
s = ""
# Create the XML document header
s += """<?xml version="1.0" encoding="utf-8" ?>"""
s += """<Connector>"""
s += """<Error number="%s" text="%s" />""" % (number, text)
s += """</Connector>"""
return s
"""
getFolders
Purpose: command to recieve a list of folders
"""
def getFolders(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFolders(resourceType, currentFolder)
else:
return self.getNonZopeFolders(resourceType, currentFolder)
def getZopeFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getNonZopeFolders(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders node
s = ""
s += """<Folders>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
# Close the folders node
s += """</Folders>"""
return s
"""
getFoldersAndFiles
Purpose: command to recieve a list of folders and files
"""
def getFoldersAndFiles(self, resourceType, currentFolder):
if (self.isZope()):
return self.getZopeFoldersAndFiles(resourceType, currentFolder)
else:
return self.getNonZopeFoldersAndFiles(resourceType, currentFolder)
def getNonZopeFoldersAndFiles(self, resourceType, currentFolder):
# Map the virtual path to our local server
serverPath = self.serverMapFolder(resourceType, currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = os.path.join(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
self.convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
# Return it
s = folders + files
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
self.convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
"""
createFolder
Purpose: command to create a new folder
"""
def createFolder(self, resourceType, currentFolder):
if (self.isZope()):
return self.createZopeFolder(resourceType, currentFolder)
else:
return self.createNonZopeFolder(resourceType, currentFolder)
def createZopeFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
def createNonZopeFolder(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
try:
newFolderPath = currentFolderPath + newFolder
errorMsg = self.createServerFolder(newFolderPath)
if (errorMsg is not None):
errorNo = 110
except:
errorNo = 103
else:
errorNo = 102
error = """<Error number="%s" originalDescription="%s" />""" % (
errorNo,
self.convertToXmlAttribute(errorMsg)
)
return error
"""
getFileName
Purpose: helper function to extrapolate the filename
"""
def getFileName(self, filename):
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
"""
fileUpload
Purpose: command to upload files to server
"""
def fileUpload(self, resourceType, currentFolder):
if (self.isZope()):
return self.zopeFileUpload(resourceType, currentFolder)
else:
return self.nonZopeFileUpload(resourceType, currentFolder)
def zopeFileUpload(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
self.zopeFileUpload(resourceType, currentFolder, count)
return
def nonZopeFileUpload(self, resourceType, currentFolder):
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileNameOnly = self.removeExtension(newFileName)
newFileExtension = self.getExtension(newFileName).lower()
allowedExtensions = self.getAllowedExtensions(resourceType)
deniedExtensions = self.getDeniedExtensions(resourceType)
if (allowedExtensions is not None):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions is not None):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
if (self.isZope()):
# Upload into zope
self.zopeFileUpload(resourceType, currentFolder)
else:
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = self.serverMapFolder(
resourceType,
currentFolder
)
i = 0
while (True):
newFilePath = "%s%s" % (
currentFolderPath,
newFileName
)
if os.path.exists(newFilePath):
i += 1
newFilePath = "%s%s(%s).%s" % (
currentFolderPath,
newFileNameOnly,
i,
newFileExtension
)
errorNo = 201
break
else:
fileHandle = open(newFilePath,'w')
linecount = 0
while (1):
#line = newFile.file.readline()
line = newFile.readline()
if not line: break
fileHandle.write("%s" % line)
linecount += 1
os.chmod(newFilePath, 0777)
break
else:
newFileName = "Extension not allowed"
errorNo = 203
else:
newFileName = "No File"
errorNo = 202
string = """
<script type="text/javascript">
window.parent.frames["frmUpload"].OnUploadCompleted(%s,"%s");
</script>
""" % (
errorNo,
newFileName.replace('"',"'")
)
return string
def run(self):
s = ""
try:
# Check if this is disabled
if not(self.enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations and try again")
# Make sure we have valid inputs
if not(
(self.request.has_key("Command")) and
(self.request.has_key("Type")) and
(self.request.has_key("CurrentFolder"))
):
return
# Get command
command = self.request.get("Command", None)
# Get resource type
resourceType = self.request.get("Type", None)
# folder syntax must start and end with "/"
currentFolder = self.request.get("CurrentFolder", None)
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Check for invalid paths
if (".." in currentFolder):
return self.sendError(102, "")
# File upload doesn't have to return XML, so intercept
# her:e
if (command == "FileUpload"):
return self.fileUpload(resourceType, currentFolder)
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder)
# Execute the command
if (command == "GetFolders"):
f = self.getFolders
elif (command == "GetFoldersAndFiles"):
f = self.getFoldersAndFiles
elif (command == "CreateFolder"):
f = self.createFolder
else:
f = None
if (f is not None):
s += f(resourceType, currentFolder)
s += self.createXmlFooter()
except Exception, e:
s = "ERROR: %s" % e
return s
# Running from command line
if __name__ == '__main__':
# To test the output, uncomment the standard headers
#print "Content-Type: text/html"
#print ""
print getFCKeditorConnector()
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.connector as connector
return connector.getFCKeditorConnector(context=context).run()
"""
| gpl-3.0 |
sguazt/prometheus | tools/giws/datatypes/stringDataGiws.py | 1 | 10567 | #!/usr/bin/python -u
# Copyright or Copr. INRIA/Scilab - Sylvestre LEDRU
#
# Sylvestre LEDRU - <[email protected]> <[email protected]>
#
# This software is a computer program whose purpose is to generate C++ wrapper
# for Java objects/methods.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
# For more information, see the file COPYING
from datatypes.dataGiws import dataGiws
from configGiws import configGiws
from JNIFrameWork import JNIFrameWork
class stringDataGiws(dataGiws):
nativeType="char *"
callMethod="CallObjectMethod"
callStaticMethod="CallStaticObjectMethod"
temporaryVariableName="myStringBuffer"
def getTypeSignature(self):
return "Ljava/lang/String;"
def getJavaTypeSyntax(self):
if self.isArray():
return "jobjectArray"
else:
return "jstring"
def getRealJavaType(self):
return "java.lang.String"
def getDescription(self):
return "Java String"
def getNativeType(self, ForceNotArray=False, UseConst=False):
if self.isArray():
if UseConst:
pointer = " const*"
else:
pointer = "*"
return ("char" + pointer) + pointer * self.getDimensionArray()
else:
if UseConst:
pointer = " const*"
else:
pointer = "*"
return "char" + pointer
def __errorMemoryString(self, detachThread):
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMemBis="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMemBis="""std::cerr << "Could not convert C string to Java UTF string, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
return errorMgntMemBis
def specificPreProcessing(self, parameter, detachThread):
""" Overrides the preprocessing of the array """
name=parameter.getName()
# Management of the error when not enought memory to create the string
if configGiws().getThrowsException():
errorMgntMem="""%sthrow %s::JniBadAllocException(curEnv);"""%(detachThread,configGiws().getExceptionFileName())
else:
errorMgntMem="""std::cerr << "Could not allocate Java string array, memory full." << std::endl;%s
exit(EXIT_FAILURE);"""%(detachThread)
errorMgntMemBis = self.__errorMemoryString(detachThread)
if self.isArray():
if self.getDimensionArray() == 1:
return """
// create java array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, stringArrayClass, NULL);
if (%s_ == NULL)
{
%s
}
// convert each char * to java strings and fill the java array.
for ( int i = 0; i < %sSize; i++)
{
jstring TempString = curEnv->NewStringUTF( %s[i] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %s_, i, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}"""%(name,name,name,errorMgntMem,name,name,errorMgntMemBis,name)
else:
return """
// create java array of array of strings.
jobjectArray %s_ = curEnv->NewObjectArray( %sSize, curEnv->FindClass("[Ljava/lang/String;"), NULL);
if (%s_ == NULL)
{
%s
}
for ( int i = 0; i < %sSize; i++)
{
jobjectArray %sLocal = curEnv->NewObjectArray( %sSizeCol, stringArrayClass, NULL);
// convert each char * to java strings and fill the java array.
for ( int j = 0; j < %sSizeCol; j++) {
jstring TempString = curEnv->NewStringUTF( %s[i][j] );
if (TempString == NULL)
{
%s
}
curEnv->SetObjectArrayElement( %sLocal, j, TempString);
// avoid keeping reference on too many strings
curEnv->DeleteLocalRef(TempString);
}
curEnv->SetObjectArrayElement(%s_, i, %sLocal);
curEnv->DeleteLocalRef(%sLocal);
}"""%(name,name,name,errorMgntMem,name,name,name,name,name,errorMgntMemBis,name,name,name,name)
else:
# Need to store is for the post processing (delete)
self.parameterName=name
tempName=name+"_"
return """
jstring %s = curEnv->NewStringUTF( %s );
if (%s != NULL && %s == NULL)
{
%s
}
"""%(tempName,name,name,tempName,errorMgntMemBis)
def specificPostProcessing(self, detachThread):
""" Called when we are returning a string or an array of string """
# We are doing an exception check here JUST in this case because
# in methodGiws::__createMethodBody we usually do it at the end
# of the method just after deleting the variable
# but when dealing with string, in this method, we are calling some
# methods which override the "exception engine" which drive the JNI
# engine crazy.
str=JNIFrameWork().getExceptionCheckProfile(detachThread)
str=str+"if (res != NULL) { "
if self.isArray():
strCommon=""
strDeclaration=""
if configGiws().getDisableReturnSize()==True:
strCommon+="int lenRow;"
else:
# The size of the array is returned as output argument of the function
strDeclaration="*"
strCommon+="""
%s lenRow = curEnv->GetArrayLength(res);
"""%(strDeclaration)
self.temporaryVariableName="arrayOfString"
if self.getDimensionArray() == 1:
str+=strCommon+"""
char **arrayOfString;
arrayOfString = new char *[%slenRow];
for (jsize i = 0; i < %slenRow; i++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(res, i));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
"""%(strDeclaration, strDeclaration)
return str
else:
if configGiws().getDisableReturnSize()==True:
str+="int lenCol;"
str+=strCommon+"""
char ***arrayOfString;
arrayOfString = new char **[%slenRow];
for (jsize i = 0; i < %slenRow; i++){ /* Line of the array */
jobjectArray resStringLine = reinterpret_cast<jobjectArray>(curEnv->GetObjectArrayElement(res, i));
%slenCol = curEnv->GetArrayLength(resStringLine);
arrayOfString[i]=new char*[%slenCol];
for (jsize j = 0; j < %slenCol; j++){
jstring resString = reinterpret_cast<jstring>(curEnv->GetObjectArrayElement(resStringLine, j));
const char *tempString = curEnv->GetStringUTFChars(resString, 0);
arrayOfString[i][j] = new char[strlen(tempString) + 1];
strcpy(arrayOfString[i][j], tempString);
curEnv->ReleaseStringUTFChars(resString, tempString);
curEnv->DeleteLocalRef(resString);
}
curEnv->DeleteLocalRef(resStringLine);
}
"""%(strDeclaration, strDeclaration, strDeclaration, strDeclaration, strDeclaration)
return str
else:
if hasattr(self,"parameterName"):
str+="""curEnv->DeleteLocalRef(%s);"""%(self.parameterName+"_")
str=str+"""
const char *tempString = curEnv->GetStringUTFChars(res, 0);
char * %s = new char[strlen(tempString) + 1];
strcpy(%s, tempString);
curEnv->ReleaseStringUTFChars(res, tempString);
curEnv->DeleteLocalRef(res);
"""%(self.temporaryVariableName, self.temporaryVariableName)
return str
def getReturnSyntax(self):
str=""
if self.isArray():
str = str + """
curEnv->DeleteLocalRef(res);
return arrayOfString;
"""
else:
str = str + """
return %s;
"""%(self.temporaryVariableName)
str = str + """ } else {
curEnv->DeleteLocalRef(res);
return NULL;
}"""
return str
| apache-2.0 |
vitan/django | tests/template_tests/syntax_tests/test_invalid_string.py | 46 | 2257 | from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
ypid/series60-remote | pc/lib/log.py | 1 | 1490 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2009 Lukas Hetzenecker <[email protected]>
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import logging
class QtStreamHandler(logging.Handler):
def __init__(self, parent, main):
logging.Handler.__init__(self)
self.parent = parent
self.main = main
self.textWidget = parent
self.formater = logging.Formatter("%(message)s")
def setFormatter(self, format):
self.formater = format
def createLock(self):
self.mutex = QMutex()
def acquire(self):
self.mutex.lock()
def release(self):
self.mutex.unlock()
def emit(self,record):
self.textWidget.appendPlainText(self.formater.format(record))
self.textWidget.moveCursor(QTextCursor.StartOfLine)
self.textWidget.ensureCursorVisible()
class QtOutput(object):
def __init__(self, parent, out=None, color=None):
self.textWidget = parent
self.out = out
self.color = color
def write(self, m):
self.textWidget.moveCursor(QTextCursor.End)
if self.color:
tc = self.textWidget.textColor()
self.textWidget.setTextColor(self.color)
self.textWidget.insertPlainText( m )
if self.color:
self.textWidget.setTextColor(tc)
if self.out:
if isinstance(m, unicode):
self.out.write(m.encode("utf8"))
else:
self.out.write(m)
| gpl-2.0 |
hmen89/odoo | addons/gamification/models/goal.py | 24 | 25742 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval
from openerp.tools.translate import _
import logging
import time
from datetime import date, datetime, timedelta
_logger = logging.getLogger(__name__)
class gamification_goal_definition(osv.Model):
"""Goal definition
A goal definition contains the way to evaluate an objective
Each module wanting to be able to set goals to the users needs to create
a new gamification_goal_definition
"""
_name = 'gamification.goal.definition'
_description = 'Gamification goal definition'
def _get_suffix(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for goal in self.browse(cr, uid, ids, context=context):
if goal.suffix and not goal.monetary:
res[goal.id] = goal.suffix
elif goal.monetary:
# use the current user's company currency
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if goal.suffix:
res[goal.id] = "%s %s" % (user.company_id.currency_id.symbol, goal.suffix)
else:
res[goal.id] = user.company_id.currency_id.symbol
else:
res[goal.id] = ""
return res
_columns = {
'name': fields.char('Goal Definition', required=True, translate=True),
'description': fields.text('Goal Description'),
'monetary': fields.boolean('Monetary Value', help="The target and current value are defined in the company currency."),
'suffix': fields.char('Suffix', help="The unit of the target and current values", translate=True),
'full_suffix': fields.function(_get_suffix, type="char", string="Full Suffix", help="The currency and suffix field"),
'computation_mode': fields.selection([
('manually', 'Recorded manually'),
('count', 'Automatic: number of records'),
('sum', 'Automatic: sum on a field'),
('python', 'Automatic: execute a specific Python code'),
],
string="Computation Mode",
help="Defined how will be computed the goals. The result of the operation will be stored in the field 'Current'.",
required=True),
'display_mode': fields.selection([
('progress', 'Progressive (using numerical values)'),
('boolean', 'Exclusive (done or not-done)'),
],
string="Displayed as", required=True),
'model_id': fields.many2one('ir.model',
string='Model',
help='The model object for the field to evaluate'),
'field_id': fields.many2one('ir.model.fields',
string='Field to Sum',
help='The field containing the value to evaluate'),
'field_date_id': fields.many2one('ir.model.fields',
string='Date Field',
help='The date to use for the time period evaluated'),
'domain': fields.char("Filter Domain",
help="Domain for filtering records. General rule, not user depending, e.g. [('state', '=', 'done')]. The expression can contain reference to 'user' which is a browse record of the current user if not in batch mode.",
required=True),
'batch_mode': fields.boolean('Batch Mode',
help="Evaluate the expression in batch instead of once for each user"),
'batch_distinctive_field': fields.many2one('ir.model.fields',
string="Distinctive field for batch user",
help="In batch mode, this indicates which field distinct one user form the other, e.g. user_id, partner_id..."),
'batch_user_expression': fields.char("Evaluted expression for batch mode",
help="The value to compare with the distinctive field. The expression can contain reference to 'user' which is a browse record of the current user, e.g. user.id, user.partner_id.id..."),
'compute_code': fields.text('Python Code',
help="Python code to be executed for each user. 'result' should contains the new current value. Evaluated user can be access through object.user_id."),
'condition': fields.selection([
('higher', 'The higher the better'),
('lower', 'The lower the better')
],
string='Goal Performance',
help='A goal is considered as completed when the current value is compared to the value to reach',
required=True),
'action_id': fields.many2one('ir.actions.act_window', string="Action",
help="The action that will be called to update the goal value."),
'res_id_field': fields.char("ID Field of user",
help="The field name on the user profile (res.users) containing the value for res_id for action."),
}
_defaults = {
'condition': 'higher',
'computation_mode': 'manually',
'domain': "[]",
'monetary': False,
'display_mode': 'progress',
}
def number_following(self, cr, uid, model_name="mail.thread", context=None):
"""Return the number of 'model_name' objects the user is following
The model specified in 'model_name' must inherit from mail.thread
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return self.pool.get('mail.followers').search(cr, uid, [('res_model', '=', model_name), ('partner_id', '=', user.partner_id.id)], count=True, context=context)
def _check_domain_validity(self, cr, uid, ids, context=None):
# take admin as should always be present
superuser = self.pool['res.users'].browse(cr, uid, SUPERUSER_ID, context=context)
for definition in self.browse(cr, uid, ids, context=context):
if definition.computation_mode not in ('count', 'sum'):
continue
obj = self.pool[definition.model_id.model]
try:
domain = safe_eval(definition.domain, {'user': superuser})
# demmy search to make sure the domain is valid
obj.search(cr, uid, domain, context=context, count=True)
except (ValueError, SyntaxError), e:
msg = e.message or (e.msg + '\n' + e.text)
raise osv.except_osv(_('Error!'),_("The domain for the definition %s seems incorrect, please check it.\n\n%s" % (definition.name, msg)))
return True
def create(self, cr, uid, vals, context=None):
res_id = super(gamification_goal_definition, self).create(cr, uid, vals, context=context)
if vals.get('computation_mode') in ('count', 'sum'):
self._check_domain_validity(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
res = super(gamification_goal_definition, self).write(cr, uid, ids, vals, context=context)
if vals.get('computation_mode', 'count') in ('count', 'sum') and (vals.get('domain') or vals.get('model_id')):
self._check_domain_validity(cr, uid, ids, context=context)
return res
class gamification_goal(osv.Model):
"""Goal instance for a user
An individual goal for a user on a specified time period"""
_name = 'gamification.goal'
_description = 'Gamification goal instance'
def _get_completion(self, cr, uid, ids, field_name, arg, context=None):
"""Return the percentage of completeness of the goal, between 0 and 100"""
res = dict.fromkeys(ids, 0.0)
for goal in self.browse(cr, uid, ids, context=context):
if goal.definition_condition == 'higher':
if goal.current >= goal.target_goal:
res[goal.id] = 100.0
else:
res[goal.id] = round(100.0 * goal.current / goal.target_goal, 2)
elif goal.current < goal.target_goal:
# a goal 'lower than' has only two values possible: 0 or 100%
res[goal.id] = 100.0
else:
res[goal.id] = 0.0
return res
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
return {'value': {'computation_mode': goal_definition.computation_mode, 'definition_condition': goal_definition.condition}}
_columns = {
'definition_id': fields.many2one('gamification.goal.definition', string='Goal Definition', required=True, ondelete="cascade"),
'user_id': fields.many2one('res.users', string='User', required=True),
'line_id': fields.many2one('gamification.challenge.line', string='Challenge Line', ondelete="cascade"),
'challenge_id': fields.related('line_id', 'challenge_id',
string="Challenge",
type='many2one',
relation='gamification.challenge',
store=True, readonly=True,
help="Challenge that generated the goal, assign challenge to users to generate goals with a value in this field."),
'start_date': fields.date('Start Date'),
'end_date': fields.date('End Date'), # no start and end = always active
'target_goal': fields.float('To Reach',
required=True,
track_visibility='always'), # no goal = global index
'current': fields.float('Current Value', required=True, track_visibility='always'),
'completeness': fields.function(_get_completion, type='float', string='Completeness'),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In progress'),
('reached', 'Reached'),
('failed', 'Failed'),
('canceled', 'Canceled'),
],
string='State',
required=True,
track_visibility='always'),
'to_update': fields.boolean('To update'),
'closed': fields.boolean('Closed goal', help="These goals will not be recomputed."),
'computation_mode': fields.related('definition_id', 'computation_mode', type='char', string="Computation mode"),
'remind_update_delay': fields.integer('Remind delay',
help="The number of days after which the user assigned to a manual goal will be reminded. Never reminded if no value is specified."),
'last_update': fields.date('Last Update',
help="In case of manual goal, reminders are sent if the goal as not been updated for a while (defined in challenge). Ignored in case of non-manual goal or goal not linked to a challenge."),
'definition_description': fields.related('definition_id', 'description', type='char', string='Definition Description', readonly=True),
'definition_condition': fields.related('definition_id', 'condition', type='char', string='Definition Condition', readonly=True),
'definition_suffix': fields.related('definition_id', 'full_suffix', type="char", string="Suffix", readonly=True),
'definition_display': fields.related('definition_id', 'display_mode', type="char", string="Display Mode", readonly=True),
}
_defaults = {
'current': 0,
'state': 'draft',
'start_date': fields.date.today,
}
_order = 'create_date desc, end_date desc, definition_id, id'
def _check_remind_delay(self, cr, uid, goal, context=None):
"""Verify if a goal has not been updated for some time and send a
reminder message of needed.
:return: data to write on the goal object
"""
if goal.remind_update_delay and goal.last_update:
delta_max = timedelta(days=goal.remind_update_delay)
last_update = datetime.strptime(goal.last_update, DF).date()
if date.today() - last_update > delta_max:
# generate a remind report
temp_obj = self.pool.get('email.template')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_goal_reminder', context)
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.goal', goal.id, context=context)
self.pool['mail.thread'].message_post(cr, uid, 0, body=body_html, partner_ids=[goal.user_id.partner_id.id], context=context, subtype='mail.mt_comment')
return {'to_update': True}
return {}
def update(self, cr, uid, ids, context=None):
"""Update the goals to recomputes values and change of states
If a manual goal is not updated for enough time, the user will be
reminded to do so (done only once, in 'inprogress' state).
If a goal reaches the target value, the status is set to reached
If the end date is passed (at least +1 day, time not considered) without
the target value being reached, the goal is set as failed."""
if context is None:
context = {}
commit = context.get('commit_gamification', False)
goals_by_definition = {}
all_goals = {}
for goal in self.browse(cr, uid, ids, context=context):
if goal.state in ('draft', 'canceled'):
# draft or canceled goals should not be recomputed
continue
goals_by_definition.setdefault(goal.definition_id, []).append(goal)
all_goals[goal.id] = goal
for definition, goals in goals_by_definition.items():
goals_to_write = dict((goal.id, {}) for goal in goals)
if definition.computation_mode == 'manually':
for goal in goals:
goals_to_write[goal.id].update(self._check_remind_delay(cr, uid, goal, context))
elif definition.computation_mode == 'python':
# TODO batch execution
for goal in goals:
# execute the chosen method
cxt = {
'self': self.pool.get('gamification.goal'),
'object': goal,
'pool': self.pool,
'cr': cr,
'context': dict(context), # copy context to prevent side-effects of eval
'uid': uid,
'date': date, 'datetime': datetime, 'timedelta': timedelta, 'time': time
}
code = definition.compute_code.strip()
safe_eval(code, cxt, mode="exec", nocopy=True)
# the result of the evaluated codeis put in the 'result' local variable, propagated to the context
result = cxt.get('result')
if result is not None and type(result) in (float, int, long):
if result != goal.current:
goals_to_write[goal.id]['current'] = result
else:
_logger.exception(_('Invalid return content from the evaluation of code for definition %s' % definition.name))
else: # count or sum
obj = self.pool.get(definition.model_id.model)
field_date_name = definition.field_date_id and definition.field_date_id.name or False
if definition.computation_mode == 'count' and definition.batch_mode:
# batch mode, trying to do as much as possible in one request
general_domain = safe_eval(definition.domain)
field_name = definition.batch_distinctive_field.name
subqueries = {}
for goal in goals:
start_date = field_date_name and goal.start_date or False
end_date = field_date_name and goal.end_date or False
subqueries.setdefault((start_date, end_date), {}).update({goal.id:safe_eval(definition.batch_user_expression, {'user': goal.user_id})})
# the global query should be split by time periods (especially for recurrent goals)
for (start_date, end_date), query_goals in subqueries.items():
subquery_domain = list(general_domain)
subquery_domain.append((field_name, 'in', list(set(query_goals.values()))))
if start_date:
subquery_domain.append((field_date_name, '>=', start_date))
if end_date:
subquery_domain.append((field_date_name, '<=', end_date))
if field_name == 'id':
# grouping on id does not work and is similar to search anyway
user_ids = obj.search(cr, uid, subquery_domain, context=context)
user_values = [{'id': user_id, 'id_count': 1} for user_id in user_ids]
else:
user_values = obj.read_group(cr, uid, subquery_domain, fields=[field_name], groupby=[field_name], context=context)
# user_values has format of read_group: [{'partner_id': 42, 'partner_id_count': 3},...]
for goal in [g for g in goals if g.id in query_goals.keys()]:
for user_value in user_values:
queried_value = field_name in user_value and user_value[field_name] or False
if isinstance(queried_value, tuple) and len(queried_value) == 2 and isinstance(queried_value[0], (int, long)):
queried_value = queried_value[0]
if queried_value == query_goals[goal.id]:
new_value = user_value.get(field_name+'_count', goal.current)
if new_value != goal.current:
goals_to_write[goal.id]['current'] = new_value
else:
for goal in goals:
# eval the domain with user replaced by goal user object
domain = safe_eval(definition.domain, {'user': goal.user_id})
# add temporal clause(s) to the domain if fields are filled on the goal
if goal.start_date and field_date_name:
domain.append((field_date_name, '>=', goal.start_date))
if goal.end_date and field_date_name:
domain.append((field_date_name, '<=', goal.end_date))
if definition.computation_mode == 'sum':
field_name = definition.field_id.name
# TODO for master: group on user field in batch mode
res = obj.read_group(cr, uid, domain, [field_name], [], context=context)
new_value = res and res[0][field_name] or 0.0
else: # computation mode = count
new_value = obj.search(cr, uid, domain, context=context, count=True)
# avoid useless write if the new value is the same as the old one
if new_value != goal.current:
goals_to_write[goal.id]['current'] = new_value
for goal_id, value in goals_to_write.items():
if not value:
continue
goal = all_goals[goal_id]
# check goal target reached
if (goal.definition_id.condition == 'higher' and value.get('current', goal.current) >= goal.target_goal) \
or (goal.definition_id.condition == 'lower' and value.get('current', goal.current) <= goal.target_goal):
value['state'] = 'reached'
# check goal failure
elif goal.end_date and fields.date.today() > goal.end_date:
value['state'] = 'failed'
value['closed'] = True
if value:
self.write(cr, uid, [goal.id], value, context=context)
if commit:
cr.commit()
return True
def action_start(self, cr, uid, ids, context=None):
"""Mark a goal as started.
This should only be used when creating goals manually (in draft state)"""
self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
return self.update(cr, uid, ids, context=context)
def action_reach(self, cr, uid, ids, context=None):
"""Mark a goal as reached.
If the target goal condition is not met, the state will be reset to In
Progress at the next goal update until the end date."""
return self.write(cr, uid, ids, {'state': 'reached'}, context=context)
def action_fail(self, cr, uid, ids, context=None):
"""Set the state of the goal to failed.
A failed goal will be ignored in future checks."""
return self.write(cr, uid, ids, {'state': 'failed'}, context=context)
def action_cancel(self, cr, uid, ids, context=None):
"""Reset the completion after setting a goal as reached or failed.
This is only the current state, if the date and/or target criterias
match the conditions for a change of state, this will be applied at the
next goal update."""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add a 'no_remind_goal' field to True"""
context = dict(context or {})
context['no_remind_goal'] = True
return super(gamification_goal, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
"""Overwrite the write method to update the last_update field to today
If the current value is changed and the report frequency is set to On
change, a report is generated
"""
if context is None:
context = {}
vals['last_update'] = fields.date.today()
result = super(gamification_goal, self).write(cr, uid, ids, vals, context=context)
for goal in self.browse(cr, uid, ids, context=context):
if goal.state != "draft" and ('definition_id' in vals or 'user_id' in vals):
# avoid drag&drop in kanban view
raise osv.except_osv(_('Error!'), _('Can not modify the configuration of a started goal'))
if vals.get('current'):
if 'no_remind_goal' in context:
# new goals should not be reported
continue
if goal.challenge_id and goal.challenge_id.report_message_frequency == 'onchange':
self.pool.get('gamification.challenge').report_progress(cr, SUPERUSER_ID, goal.challenge_id, users=[goal.user_id], context=context)
return result
def get_action(self, cr, uid, goal_id, context=None):
"""Get the ir.action related to update the goal
In case of a manual goal, should return a wizard to update the value
:return: action description in a dictionnary
"""
goal = self.browse(cr, uid, goal_id, context=context)
if goal.definition_id.action_id:
# open a the action linked to the goal
action = goal.definition_id.action_id.read()[0]
if goal.definition_id.res_id_field:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
action['res_id'] = safe_eval(goal.definition_id.res_id_field, {'user': current_user})
# if one element to display, should see it in form mode if possible
action['views'] = [(view_id, mode) for (view_id, mode) in action['views'] if mode == 'form'] or action['views']
return action
if goal.computation_mode == 'manually':
# open a wizard window to update the value manually
action = {
'name': _("Update %s") % goal.definition_id.name,
'id': goal_id,
'type': 'ir.actions.act_window',
'views': [[False, 'form']],
'target': 'new',
'context': {'default_goal_id': goal_id, 'default_current': goal.current},
'res_model': 'gamification.goal.wizard'
}
return action
return False
| agpl-3.0 |
superdesk/superdesk-core | apps/ldap/users_service.py | 2 | 1273 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from superdesk.users import UsersService, UsersResource, is_admin # NOQA
logger = logging.getLogger(__name__)
class ADUsersService(UsersService):
"""
Service class for UsersResource and should be used when AD is active.
"""
readonly_fields = ["email", "first_name", "last_name"]
def on_fetched(self, doc):
super().on_fetched(doc)
for document in doc["_items"]:
self.set_defaults(document)
def on_fetched_item(self, doc):
super().on_fetched_item(doc)
self.set_defaults(doc)
def set_defaults(self, doc):
"""Set the readonly fields for LDAP user.
:param dict doc: user
"""
readonly = {}
user_attributes = config.LDAP_USER_ATTRIBUTES
for value in user_attributes.values():
if value in self.readonly_fields:
readonly[value] = True
doc["_readonly"] = readonly
| agpl-3.0 |
vlachoudis/sl4a | python/src/Lib/plat-irix5/IN.py | 66 | 3097 | # Generated by h2py from /usr/include/netinet/in.h
from warnings import warnpy3k
warnpy3k("the IN module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Included from sys/endian.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
BYTE_ORDER = LITTLE_ENDIAN
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def htonl(x): return ntohl(x)
def htons(x): return ntohs(x)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
NBBY = 8
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_XTP = 36
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_MAXPORT = 65535
def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_BROADCAST = 0xffffffff
INADDR_LOOPBACK = 0x7F000001
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 7
IP_TOS = 8
IP_TTL = 9
IP_RECVOPTS = 10
IP_RECVRETOPTS = 11
IP_RECVDSTADDR = 12
IP_RETOPTS = 13
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 20
IP_MULTICAST_TTL = 21
IP_MULTICAST_LOOP = 22
IP_ADD_MEMBERSHIP = 23
IP_DROP_MEMBERSHIP = 24
IRIX4_IP_OPTIONS = 1
IRIX4_IP_MULTICAST_IF = 2
IRIX4_IP_MULTICAST_TTL = 3
IRIX4_IP_MULTICAST_LOOP = 4
IRIX4_IP_ADD_MEMBERSHIP = 5
IRIX4_IP_DROP_MEMBERSHIP = 6
IRIX4_IP_HDRINCL = 7
IRIX4_IP_TOS = 8
IRIX4_IP_TTL = 9
IRIX4_IP_RECVOPTS = 10
IRIX4_IP_RECVRETOPTS = 11
IRIX4_IP_RECVDSTADDR = 12
IRIX4_IP_RETOPTS = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| apache-2.0 |
l2isbad/netdata | collectors/python.d.plugin/python_modules/pyyaml2/composer.py | 4 | 4952 | # SPDX-License-Identifier: MIT
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| gpl-3.0 |
usc-isi/essex-baremetal-support | nova/tests/notifier/test_list_notifier.py | 5 | 3462 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova
from nova import log as logging
import nova.notifier.api
import nova.notifier.log_notifier
import nova.notifier.no_op_notifier
from nova.notifier import list_notifier
from nova import test
class NotifierListTestCase(test.TestCase):
"""Test case for notifications"""
def setUp(self):
super(NotifierListTestCase, self).setUp()
list_notifier._reset_drivers()
# Mock log to add one to exception_count when log.exception is called
def mock_exception(cls, *args):
self.exception_count += 1
self.exception_count = 0
list_notifier_log = logging.getLogger('nova.notifier.list_notifier')
self.stubs.Set(list_notifier_log, "exception", mock_exception)
# Mock no_op notifier to add one to notify_count when called.
def mock_notify(cls, *args):
self.notify_count += 1
self.notify_count = 0
self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify)
# Mock log_notifier to raise RuntimeError when called.
def mock_notify2(cls, *args):
raise RuntimeError("Bad notifier.")
self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2)
def tearDown(self):
list_notifier._reset_drivers()
super(NotifierListTestCase, self).tearDown()
def test_send_notifications_successfully(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.no_op_notifier'])
nova.notifier.api.notify('publisher_id', 'event_type',
nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.notify_count, 2)
self.assertEqual(self.exception_count, 0)
def test_send_notifications_with_errors(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.log_notifier'])
nova.notifier.api.notify('publisher_id',
'event_type', nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.notify_count, 1)
self.assertEqual(self.exception_count, 1)
def test_when_driver_fails_to_import(self):
self.flags(notification_driver='nova.notifier.list_notifier',
list_notifier_drivers=['nova.notifier.no_op_notifier',
'nova.notifier.logo_notifier',
'fdsjgsdfhjkhgsfkj'])
nova.notifier.api.notify('publisher_id',
'event_type', nova.notifier.api.WARN, dict(a=3))
self.assertEqual(self.exception_count, 2)
self.assertEqual(self.notify_count, 1)
| apache-2.0 |
honghaoz/UW-Info-Session | UW-Info-Session-1.0/GAE Support/uw-info2/libs/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit |
virt-who/virt-who | virtwho/manager/subscriptionmanager/subscriptionmanager.py | 1 | 16260 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Module for communication with subscription-manager, part of virt-who
Copyright (C) 2011 Radek Novacek <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os
import json
from six.moves.http_client import BadStatusLine
from six import string_types
import rhsm.connection as rhsm_connection
import rhsm.certificate as rhsm_certificate
import rhsm.config as rhsm_config
from virtwho.config import NotSetSentinel
from virtwho.manager import Manager, ManagerError, ManagerFatalError, ManagerThrottleError
from virtwho.virt import AbstractVirtReport
from virtwho.util import generate_correlation_id
class SubscriptionManagerError(ManagerError):
pass
class SubscriptionManagerUnregisteredError(ManagerFatalError):
pass
# Mapping between strings returned from getJob and report statuses
STATE_MAPPING = {
'FINISHED': AbstractVirtReport.STATE_FINISHED,
'CANCELED': AbstractVirtReport.STATE_CANCELED,
'FAILED': AbstractVirtReport.STATE_FAILED,
'RUNNING': AbstractVirtReport.STATE_PROCESSING,
'WAITING': AbstractVirtReport.STATE_PROCESSING,
'CREATED': AbstractVirtReport.STATE_PROCESSING,
}
class NamedOptions(object):
"""
Object used for compatibility with RHSM
"""
pass
class SubscriptionManager(Manager):
sm_type = "sam"
""" Class for interacting subscription-manager. """
def __init__(self, logger, options):
self.logger = logger
self.options = options
self.cert_uuid = None
self.rhsm_config = None
self.cert_file = None
self.key_file = None
self.readConfig()
self.connection = None
self.correlation_id = generate_correlation_id()
def readConfig(self):
""" Parse rhsm.conf in order to obtain consumer
certificate and key paths. """
self.rhsm_config = rhsm_config.initConfig(
rhsm_config.DEFAULT_CONFIG_PATH)
consumer_cert_dir = self.rhsm_config.get("rhsm", "consumerCertDir")
cert = 'cert.pem'
key = 'key.pem'
self.cert_file = os.path.join(consumer_cert_dir, cert)
self.key_file = os.path.join(consumer_cert_dir, key)
def _check_owner_lib(self, kwargs, config):
"""
Try to check values of env and owner. These values has to be
equal to values obtained from Satellite server.
:param kwargs: dictionary possibly containing valid username and
password used for connection to rhsm
:param config: Configuration of virt-who
:return: None
"""
if config is None:
return
# Check 'owner' and 'env' only in situation, when these values
# are set and rhsm_username and rhsm_password are not set
if 'username' not in kwargs and 'password' not in kwargs and \
'owner' in config.keys() and 'env' in config.keys():
pass
else:
return
uuid = self.uuid()
consumer = self.connection.getConsumer(uuid)
if 'environment' in consumer:
environment = consumer['environment']
else:
return
if environment:
environment_name = environment['name']
owner = self.connection.getOwner(uuid)
owner_id = owner['key']
if config['owner'] != owner_id:
raise ManagerError(
"Cannot send data to: %s, because owner from configuration: %s is different" %
(owner_id, config['owner'])
)
if config['env'] != environment_name:
raise ManagerError(
"Cannot send data to: %s, because Satellite env: %s differs from configuration: %s" %
(owner_id, environment_name, config['env'])
)
def _connect(self, config=None):
""" Connect to the subscription-manager. """
kwargs = {
'host': self.rhsm_config.get('server', 'hostname'),
'ssl_port': int(self.rhsm_config.get('server', 'port')),
'handler': self.rhsm_config.get('server', 'prefix'),
'proxy_hostname': self.rhsm_config.get('server', 'proxy_hostname'),
'proxy_port': self.rhsm_config.get('server', 'proxy_port'),
'proxy_user': self.rhsm_config.get('server', 'proxy_user'),
'proxy_password': self.rhsm_config.get('server', 'proxy_password'),
'insecure': self.rhsm_config.get('server', 'insecure')
}
kwargs_to_config = {
'host': 'rhsm_hostname',
'ssl_port': 'rhsm_port',
'handler': 'rhsm_prefix',
'proxy_hostname': 'rhsm_proxy_hostname',
'proxy_port': 'rhsm_proxy_port',
'proxy_user': 'rhsm_proxy_user',
'proxy_password': 'rhsm_proxy_password',
'insecure': 'rhsm_insecure'
}
rhsm_username = None
rhsm_password = None
if config:
try:
rhsm_username = config['rhsm_username']
rhsm_password = config['rhsm_password']
except KeyError:
pass
if rhsm_username == NotSetSentinel:
rhsm_username = None
if rhsm_password == NotSetSentinel:
rhsm_password = None
# Testing for None is necessary, it might be an empty string
for key, value in kwargs.items():
try:
from_config = config[kwargs_to_config[key]]
if from_config is not NotSetSentinel and from_config is \
not None:
if key is 'ssl_port':
from_config = int(from_config)
kwargs[key] = from_config
except KeyError:
continue
if rhsm_username and rhsm_password:
self.logger.debug("Authenticating with RHSM username %s", rhsm_username)
kwargs['username'] = rhsm_username
kwargs['password'] = rhsm_password
else:
self.logger.debug("Authenticating with certificate: %s", self.cert_file)
if not os.access(self.cert_file, os.R_OK):
raise SubscriptionManagerUnregisteredError(
"Unable to read certificate, system is not registered or you are not root")
kwargs['cert_file'] = self.cert_file
kwargs['key_file'] = self.key_file
self.logger.info("X-Correlation-ID: %s", self.correlation_id)
if self.correlation_id:
kwargs['correlation_id'] = self.correlation_id
self.connection = rhsm_connection.UEPConnection(**kwargs)
try:
if not self.connection.ping()['result']:
raise SubscriptionManagerError(
"Unable to obtain status from server, UEPConnection is likely not usable."
)
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
self._check_owner_lib(kwargs, config)
return self.connection
def sendVirtGuests(self, report, options=None):
"""
Update consumer facts with info about virtual guests.
`guests` is a list of `Guest` instances (or it children).
"""
guests = report.guests
self._connect()
# Sort the list
guests.sort(key=lambda item: item.uuid)
serialized_guests = [guest.toDict() for guest in guests]
self.logger.info('Sending update in guests lists for config '
'"%s": %d guests found',
report.config.name, len(guests))
self.logger.debug("Domain info: %s", json.dumps(serialized_guests, indent=4))
# Send list of guest uuids to the server
try:
self.connection.updateConsumer(self.uuid(), guest_uuids=serialized_guests, hypervisor_id=report.hypervisor_id)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
report.state = AbstractVirtReport.STATE_FINISHED
def hypervisorCheckIn(self, report, options=None):
""" Send hosts to guests mapping to subscription manager. """
connection = self._connect(report.config)
is_async = self._is_rhsm_server_async(report, connection)
serialized_mapping = self._hypervisor_mapping(report, is_async, connection)
self.logger.debug("Host-to-guest mapping being sent to '{owner}': {mapping}".format(
owner=report.config['owner'],
mapping=json.dumps(serialized_mapping, indent=4)))
# All subclasses of ConfigSection use dictionary like notation,
# but RHSM uses attribute like notation
if options:
named_options = NamedOptions()
for key, value in options['global'].items():
setattr(named_options, key, value)
else:
named_options = None
try:
try:
result = self.connection.hypervisorCheckIn(
report.config['owner'],
report.config['env'],
serialized_mapping,
options=named_options) # pylint:disable=unexpected-keyword-arg
except TypeError:
# This is temporary workaround until the options parameter gets implemented
# in python-rhsm
self.logger.debug(
"hypervisorCheckIn method in python-rhsm doesn't understand options parameter, ignoring"
)
result = self.connection.hypervisorCheckIn(report.config['owner'], report.config['env'], serialized_mapping)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.GoneException:
raise ManagerError("Communication with subscription manager failed: consumer no longer exists")
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
if is_async is True:
report.state = AbstractVirtReport.STATE_CREATED
report.job_id = result['id']
else:
report.state = AbstractVirtReport.STATE_FINISHED
return result
def _is_rhsm_server_async(self, report, connection=None):
"""
Check if server has capability 'hypervisor_async'.
"""
if connection is None:
self._connect(report.config)
self.logger.debug("Checking if server has capability 'hypervisor_async'")
is_async = hasattr(self.connection, 'has_capability') and self.connection.has_capability('hypervisors_async')
if is_async:
self.logger.debug("Server has capability 'hypervisors_async'")
else:
self.logger.debug("Server does not have 'hypervisors_async' capability")
return is_async
def _hypervisor_mapping(self, report, is_async, connection=None):
"""
Return mapping of hypervisor
"""
if connection is None:
self._connect(report.config)
mapping = report.association
serialized_mapping = {}
ids_seen = []
if is_async:
hosts = []
# Transform the mapping into the async version
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
hosts.append(hypervisor.toDict())
ids_seen.append(hypervisor.hypervisorId)
serialized_mapping = {'hypervisors': hosts}
else:
# Reformat the data from the mapping to make it fit with
# the old api.
for hypervisor in mapping['hypervisors']:
if hypervisor.hypervisorId in ids_seen:
self.logger.warning("The hypervisor id '%s' is assigned to 2 different systems. "
"Only one will be recorded at the server." % hypervisor.hypervisorId)
guests = [g.toDict() for g in hypervisor.guestIds]
serialized_mapping[hypervisor.hypervisorId] = guests
ids_seen.append(hypervisor.hypervisorId)
return serialized_mapping
def check_report_state(self, report):
# BZ 1554228
job_id = str(report.job_id)
self._connect(report.config)
self.logger.debug('Checking status of job %s', job_id)
try:
result = self.connection.getJob(job_id)
except BadStatusLine:
raise ManagerError("Communication with subscription manager interrupted")
except rhsm_connection.RateLimitExceededException as e:
raise ManagerThrottleError(e.retry_after)
except rhsm_connection.ConnectionException as e:
if hasattr(e, 'code'):
raise ManagerError("Communication with subscription manager failed with code %d: %s" % (e.code, str(e)))
raise ManagerError("Communication with subscription manager failed: %s" % str(e))
state = STATE_MAPPING.get(result['state'], AbstractVirtReport.STATE_FAILED)
report.state = state
if state not in (AbstractVirtReport.STATE_FINISHED,
AbstractVirtReport.STATE_CANCELED,
AbstractVirtReport.STATE_FAILED):
self.logger.debug('Job %s not finished', job_id)
else:
# log completed job status
result_data = result.get('resultData', {})
if not result_data:
self.logger.warning("Job status report without resultData: %s", result)
return
if isinstance(result_data, string_types):
self.logger.warning("Job status report encountered the following error: %s", result_data)
return
for fail in result_data.get('failedUpdate', []):
self.logger.error("Error during update list of guests: %s", str(fail))
self.logger.debug("Number of mappings unchanged: %d", len(result_data.get('unchanged', [])))
self.logger.info("Mapping for config \"%s\" updated", report.config.name)
def uuid(self):
""" Read consumer certificate and get consumer UUID from it. """
if not self.cert_uuid:
try:
certificate = rhsm_certificate.create_from_file(self.cert_file)
self.cert_uuid = certificate.subject["CN"]
except Exception as e:
raise SubscriptionManagerError("Unable to open certificate %s (%s):" % (self.cert_file, str(e)))
return self.cert_uuid
| gpl-2.0 |
sestrella/ansible | lib/ansible/modules/network/ios/ios_facts.py | 12 | 7398 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_facts
version_added: "2.2"
author:
- "Peter Sprygada (@privateip)"
- "Sumit Jaiswal (@justjais)"
short_description: Collect facts from remote devices running Cisco IOS
description:
- Collects a base set of device facts from a remote device that
is running IOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
gather_subset:
description:
- When supplied, this argument restricts the facts collected
to a given subset.
- Possible values for this argument include
C(all), C(min), C(hardware), C(config), and C(interfaces).
- Specify a list of values to include a larger subset.
- Use a value with an initial C(!) to collect all facts except that subset.
required: false
default: '!config'
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces, vlans etc.
Can specify a list of values to include a larger subset.
Values can also be used with an initial C(M(!)) to specify that
a specific subset should not be collected.
Valid subsets are 'all', 'interfaces', 'l2_interfaces', 'vlans',
'lag_interfaces', 'lacp', 'lacp_interfaces', 'lldp_global',
'lldp_interfaces', 'l3_interfaces'.
version_added: "2.9"
"""
EXAMPLES = """
- name: Gather all legacy facts
ios_facts:
gather_subset: all
- name: Gather only the config and default facts
ios_facts:
gather_subset:
- config
- name: Do not gather hardware facts
ios_facts:
gather_subset:
- "!hardware"
- name: Gather legacy and resource facts
ios_facts:
gather_subset: all
gather_network_resources: all
- name: Gather only the interfaces resource facts and no legacy facts
ios_facts:
gather_subset:
- '!all'
- '!min'
gather_network_resources:
- interfaces
- name: Gather interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: interfaces
- name: Gather L2 interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: l2_interfaces
- name: Gather L3 interfaces resource and minimal legacy facts
ios_facts:
gather_subset: min
gather_network_resources: l3_interfaces
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
ansible_net_gather_network_resources:
description: The list of fact for network resource subsets collected from the device
returned: when the resource is configured
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_iostype:
description: The operating system type (IOS or IOS-XE) running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
ansible_net_stacked_models:
description: The model names of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_stacked_serialnums:
description: The serial numbers of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_filesystems_info:
description: A hash of all file systems containing info about each file system (e.g. free and total space)
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description:
- The list of CDP and LLDP neighbors from the remote device. If both,
CDP and LLDP neighbor data is present on one port, CDP is preferred.
returned: when interfaces is configured
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.ios.facts.facts import Facts
from ansible.module_utils.network.ios.ios import ios_argument_spec
def main():
""" Main entry point for AnsibleModule
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = ['default value for `gather_subset` '
'will be changed to `min` from `!config` v2.11 onwards']
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
naiquevin/jinger | jinger/test/test_site.py | 1 | 1107 | # import unittest
import os
from jinger.site import create_empty_site, createdir
from jinger.test import DIR_PLAYGROUND, JingerPlaygroundTest
class SiteTest(JingerPlaygroundTest):
def test_create_dir(self):
mysite = createdir(DIR_PLAYGROUND, 'mysite')
self.assertTrue(os.path.exists(mysite))
# check that if the dir already exists, it raises an Exception
pass
def test_create_empty_site(self):
create_empty_site('mysite', DIR_PLAYGROUND)
newsite = os.path.join(DIR_PLAYGROUND, 'mysite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, 'templates'))
os.path.exists(os.path.join(newsite, 'public'))
os.path.exists(os.path.join(newsite, 'config.json'))
create_empty_site('myothersite', DIR_PLAYGROUND, '_source', 'www')
newsite = os.path.join(DIR_PLAYGROUND, 'myothersite')
os.path.exists(newsite)
os.path.exists(os.path.join(newsite, '_source'))
os.path.exists(os.path.join(newsite, 'www'))
os.path.exists(os.path.join(newsite, 'config.json'))
| mit |
Blitzen/oauthlib | oauthlib/oauth1/rfc5849/endpoints/resource.py | 42 | 7083 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
import logging
from .base import BaseEndpoint
from .. import errors
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| bsd-3-clause |
sunlightlabs/openstates | scrapers/md/events.py | 2 | 4320 | import pytz
import dateutil.parser
import datetime
from urllib.parse import urlsplit, parse_qs
from utils import LXMLMixin
from openstates.scrape import Scraper, Event
class MDEventScraper(Scraper, LXMLMixin):
_TZ = pytz.timezone("US/Eastern")
chambers = {"upper": "Senate", "lower": ""}
date_format = "%B %d, %Y"
def scrape(self, chamber=None, start=None, end=None):
if start is None:
start_date = datetime.datetime.now().strftime(self.date_format)
else:
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
start_date = start_date.strftime(self.date_format)
# default to 30 days if no end
if end is None:
dtdelta = datetime.timedelta(days=30)
end_date = datetime.datetime.now() + dtdelta
end_date = end_date.strftime(self.date_format)
else:
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
end_date = end_date.strftime(self.date_format)
url = "http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx?&range={} - {}"
url = url.format(start_date, end_date)
page = self.lxmlize(url)
if chamber is None:
yield from self.scrape_chamber(page, "upper")
yield from self.scrape_chamber(page, "lower")
else:
yield from self.scrape_chamber(page, chamber)
def scrape_chamber(self, page, chamber):
xpath = '//div[@id="ContentPlaceHolder1_div{}SingleColumn"]' "/div".format(
self.chambers[chamber]
)
com = None
rows = page.xpath(xpath)
for row in rows:
css = row.xpath("@class")[0]
if "CommitteeBanner" in css:
com = row.xpath("string(.//h3/a[1])").strip()
elif "CmteInfo" in css or "DayPanelSingleColumn" in css:
yield from self.parse_div(row, chamber, com)
def parse_div(self, row, chamber, com):
cal_link = row.xpath('.//a[.//span[@id="calendarmarker"]]/@href')[0]
# event_date = row.xpath('string(.//div[contains(@class,"ItemDate")])').strip()
title, location, start_date, end_date = self.parse_gcal(cal_link)
event = Event(
start_date=start_date, end_date=end_date, name=title, location_name=location
)
event.add_source("http://mgaleg.maryland.gov/webmga/frmHearingSchedule.aspx")
for item in row.xpath('.//div[@class="col-xs-12a Item"]'):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
for item in row.xpath('.//div[contains(@class,"ItemContainer")]/a'):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
event.add_document(
description,
item.xpath("@href")[0],
media_type="application/pdf",
on_duplicate="ignore",
)
for item in row.xpath(
'.//div[contains(@class,"ItemContainer")]' '[./div[@class="col-xs-1 Item"]]'
):
description = item.xpath("string(.)").strip()
agenda = event.add_agenda_item(description=description)
bill = item.xpath('.//div[@class="col-xs-1 Item"]/a/text()')[0].strip()
agenda.add_bill(bill)
video = row.xpath('.//a[./span[@class="OnDemand"]]')
if video:
event.add_media_link(
"Video of Hearing", video[0].xpath("@href")[0], "text/html"
)
if "subcommittee" in title.lower():
subcom = title.split("-")[0].strip()
event.add_participant(subcom, type="committee", note="host")
else:
event.add_participant(com, type="committee", note="host")
yield event
# Due to the convoluted HTML, it's easier just to parse the google cal links
def parse_gcal(self, url):
query = urlsplit(url).query
params = parse_qs(query)
dates = params["dates"][0].split("/")
start_date = self._TZ.localize(dateutil.parser.parse(dates[0]))
end_date = self._TZ.localize(dateutil.parser.parse(dates[1]))
return params["text"][0], params["location"][0], start_date, end_date
| gpl-3.0 |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/core/tests/test_oinspect.py | 3 | 8346 | """Tests for the object inspection functionality.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os
import re
# Third-party imports
import nose.tools as nt
# Our own imports
from .. import oinspect
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic,
register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython.external.decorator import decorator
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
inspector = oinspect.Inspector()
ip = get_ipython()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# WARNING: since this test checks the line number where a function is
# defined, if any code is inserted above, the following line will need to be
# updated. Do NOT insert any whitespace between the next line and the function
# definition below.
THIS_LINE_NUMBER = 48 # Put here the actual number of this line
def test_find_source_lines():
nt.assert_equal(oinspect.find_source_lines(test_find_source_lines),
THIS_LINE_NUMBER+1)
# A couple of utilities to ensure these tests work the same from a source or a
# binary install
def pyfile(fname):
return os.path.normcase(re.sub('.py[co]$', '.py', fname))
def match_pyfiles(f1, f2):
nt.assert_equal(pyfile(f1), pyfile(f2))
def test_find_file():
match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
def test_find_file_decorated1():
@decorator
def noop1(f):
def wrapper():
return f(*a, **kw)
return wrapper
@noop1
def f(x):
"My docstring"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring")
def test_find_file_decorated2():
@decorator
def noop2(f, *a, **kw):
return f(*a, **kw)
@noop2
def f(x):
"My docstring 2"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring 2")
def test_find_file_magic():
run = ip.find_line_magic('run')
nt.assert_not_equal(oinspect.find_file(run), None)
# A few generic objects we can then inspect in the tests below
class Call(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
class OldStyle:
"""An old-style class for testing."""
pass
def f(x, y=2, *a, **kw):
"""A simple function."""
def g(y, z=3, *a, **kw):
pass # no docstring
@register_line_magic
def lmagic(line):
"A line magic"
@register_cell_magic
def cmagic(line, cell):
"A cell magic"
@register_line_cell_magic
def lcmagic(line, cell=None):
"A line/cell magic"
@magics_class
class SimpleMagics(Magics):
@line_magic
def Clmagic(self, cline):
"A class-based line magic"
@cell_magic
def Ccmagic(self, cline, ccell):
"A class-based cell magic"
@line_cell_magic
def Clcmagic(self, cline, ccell=None):
"A class-based line/cell magic"
def check_calltip(obj, name, call, docstring):
"""Generic check pattern all calltip tests will use"""
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
def test_calltip_line_magic():
check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic")
def test_calltip_cell_magic():
check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic")
def test_calltip_line_magic():
check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)',
"A line/cell magic")
def test_class_magics():
cm = SimpleMagics(ip)
ip.register_magics(cm)
check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)',
"A class-based line magic")
check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)',
"A class-based cell magic")
check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)',
"A class-based line/cell magic")
def test_info():
"Check that Inspector.info fills out various fields as expected."
i = inspector.info(Call, oname='Call')
nt.assert_equal(i['type_name'], 'type')
expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
nt.assert_equal(i['base_class'], expted_class)
nt.assert_equal(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'>")
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
# case-insensitive comparison needed on some filesystems
# e.g. Windows:
nt.assert_equal(i['file'].lower(), fname.lower())
nt.assert_equal(i['definition'], 'Call(self, *a, **kw)\n')
nt.assert_equal(i['docstring'], Call.__doc__)
nt.assert_equal(i['source'], None)
nt.assert_true(i['isclass'])
nt.assert_equal(i['init_definition'], "Call(self, x, y=1)\n")
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
i = inspector.info(Call, detail_level=1)
nt.assert_not_equal(i['source'], None)
nt.assert_equal(i['docstring'], None)
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
nt.assert_equal(i['type_name'], 'Call')
nt.assert_equal(i['docstring'], "Modified instance docstring")
nt.assert_equal(i['class_docstring'], Call.__doc__)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
nt.assert_equal(i['call_docstring'], c.__call__.__doc__)
# Test old-style classes, which for example may not have an __init__ method.
if not py3compat.PY3:
i = inspector.info(OldStyle)
nt.assert_equal(i['type_name'], 'classobj')
i = inspector.info(OldStyle())
nt.assert_equal(i['type_name'], 'instance')
nt.assert_equal(i['docstring'], OldStyle.__doc__)
def test_getdoc():
class A(object):
"""standard docstring"""
pass
class B(object):
"""standard docstring"""
def getdoc(self):
return "custom docstring"
class C(object):
"""standard docstring"""
def getdoc(self):
return None
a = A()
b = B()
c = C()
nt.assert_equal(oinspect.getdoc(a), "standard docstring")
nt.assert_equal(oinspect.getdoc(b), "custom docstring")
nt.assert_equal(oinspect.getdoc(c), "standard docstring")
def test_pdef():
# See gh-1914
def foo(): pass
inspector.pdef(foo, 'foo')
| bsd-3-clause |
jonathan-beard/edx-platform | lms/djangoapps/teams/migrations/0004_auto__add_field_courseteam_discussion_topic_id__add_field_courseteam_l.py | 46 | 6547 | # -*- coding: utf-8 -*-
import pytz
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseTeam.last_activity_at'
db.add_column('teams_courseteam', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
# Adding field 'CourseTeamMembership.last_activity_at'
db.add_column('teams_courseteammembership', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseTeam.last_activity_at'
db.delete_column('teams_courseteam', 'last_activity_at')
# Deleting field 'CourseTeamMembership.last_activity_at'
db.delete_column('teams_courseteammembership', 'last_activity_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.courseteam': {
'Meta': {'object_name': 'CourseTeam'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'discussion_topic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('student.models.LanguageField', [], {'max_length': '16', 'blank': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'team_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topic_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.CourseTeamMembership']", 'to': "orm['auth.User']"})
},
'teams.courseteammembership': {
'Meta': {'unique_together': "(('user', 'team'),)", 'object_name': 'CourseTeamMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'membership'", 'to': "orm['teams.CourseTeam']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['teams']
| agpl-3.0 |
beagles/neutron_hacking | neutron/services/firewall/agents/varmour/varmour_api.py | 20 | 4931 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, [email protected], vArmour Networks
import base64
import httplib2
from oslo.config import cfg
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
OPTS = [
cfg.StrOpt('director', default='localhost',
help=_("vArmour director ip")),
cfg.StrOpt('director_port', default='443',
help=_("vArmour director port")),
cfg.StrOpt('username', default='varmour',
help=_("vArmour director username")),
cfg.StrOpt('password', default='varmour', secret=True,
help=_("vArmour director password")), ]
cfg.CONF.register_opts(OPTS, "vArmour")
LOG = logging.getLogger(__name__)
REST_URL_PREFIX = '/api/v1.0'
class vArmourAPIException(Exception):
message = _("An unknown exception.")
def __init__(self, **kwargs):
try:
self.err = self.message % kwargs
except Exception:
self.err = self.message
def __str__(self):
return self.err
class AuthenticationFailure(vArmourAPIException):
message = _("Invalid login credential.")
class vArmourRestAPI(object):
def __init__(self):
LOG.debug(_('vArmourRestAPI: started'))
self.user = cfg.CONF.vArmour.username
self.passwd = cfg.CONF.vArmour.password
self.server = cfg.CONF.vArmour.director
self.port = cfg.CONF.vArmour.director_port
self.timeout = 3
self.key = ''
def auth(self):
headers = {}
enc = base64.b64encode(self.user + ':' + self.passwd)
headers['Authorization'] = 'Basic ' + enc
resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers)
if resp and resp['status'] == 200:
self.key = resp['body']['auth']
return True
else:
raise AuthenticationFailure()
def commit(self):
self.rest_api('POST', va_utils.REST_URL_COMMIT)
def rest_api(self, method, url, body=None, headers=None):
url = REST_URL_PREFIX + url
if body:
body_data = json.dumps(body)
else:
body_data = ''
if not headers:
headers = {}
enc = base64.b64encode('%s:%s' % (self.user, self.key))
headers['Authorization'] = 'Basic ' + enc
LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"),
{'server': self.server, 'port': self.port})
try:
action = "https://" + self.server + ":" + self.port + url
LOG.debug(_("vArmourRestAPI Sending: "
"%(method)s %(action)s %(headers)s %(body_data)s"),
{'method': method, 'action': action,
'headers': headers, 'body_data': body_data})
h = httplib2.Http(timeout=3,
disable_ssl_certificate_validation=True)
resp, resp_str = h.request(action, method,
body=body_data,
headers=headers)
LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"),
{'status': resp.status, 'resp_str': resp_str})
if resp.status == 200:
return {'status': resp.status,
'reason': resp.reason,
'body': json.loads(resp_str)}
except Exception:
LOG.error(_('vArmourRestAPI: Could not establish HTTP connection'))
def del_cfg_objs(self, url, prefix):
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
olist = resp['body']['response']
if not olist:
return
for o in olist:
if o.startswith(prefix):
self.rest_api('DELETE', url + '/"name:%s"' % o)
self.commit()
def count_cfg_objs(self, url, prefix):
count = 0
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
for o in resp['body']['response']:
if o.startswith(prefix):
count += 1
return count
| apache-2.0 |
eerwitt/tensorflow | tensorflow/python/saved_model/main_op_impl.py | 25 | 2164 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel main op implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops as tf_data_flow_ops
from tensorflow.python.ops import variables
def main_op():
"""Returns a main op to init variables and tables.
Returns the main op including the group of ops that initializes all
variables, initializes local variables and initialize all tables.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
init = variables.global_variables_initializer()
init_local = variables.local_variables_initializer()
init_tables = tf_data_flow_ops.tables_initializer()
return control_flow_ops.group(init, init_local, init_tables)
def main_op_with_restore(restore_op_name):
"""Returns a main op to init variables, tables and restore the graph.
Returns the main op including the group of ops that initializes all
variables, initialize local variables, initialize all tables and the restore
op name.
Args:
restore_op_name: Name of the op to use to restore the graph.
Returns:
The set of ops to be run as part of the main op upon the load operation.
"""
with ops.control_dependencies([main_op()]):
main_op_with_restore = control_flow_ops.group(restore_op_name)
return main_op_with_restore
| apache-2.0 |
twobob/buildroot-kindle | output/build/host-python-2.7.2/Tools/bgen/bgen/bgenType.py | 44 | 9507 | """Type classes and a modest collection of standard types."""
from bgenOutput import *
class Type:
"""Define the various things you can do with a C type.
Most methods are intended to be extended or overridden.
"""
def __init__(self, typeName, fmt):
"""Call with the C name and getargs format for the type.
Example: int = Type("int", "i")
"""
self.typeName = typeName
self.fmt = fmt
def declare(self, name, reference=False):
"""Declare a variable of the type with a given name.
Example: int.declare('spam') prints "int spam;"
"""
for decl in self.getArgDeclarations(name, reference):
Output("%s;", decl)
for decl in self.getAuxDeclarations(name):
Output("%s;", decl)
def getArgDeclarations(self, name, reference=False, constmode=False, outmode=False):
"""Return the main part of the declarations for this type: the items
that will be passed as arguments in the C/C++ function call."""
if reference:
ref = "&"
else:
ref = ""
if constmode:
const = "const "
else:
const = ""
if outmode:
out = "*"
else:
out = ""
return ["%s%s%s%s %s" % (const, self.typeName, ref, out, name)]
def getAuxDeclarations(self, name):
"""Return any auxiliary declarations needed for implementing this
type, such as helper variables used to hold sizes, etc. These declarations
are not part of the C/C++ function call interface."""
return []
def getargs(self):
return self.getargsFormat(), self.getargsArgs()
def getargsFormat(self):
"""Return the format for this type for use with PyArg_Parse().
Example: int.getargsFormat() returns the string "i".
(getargs is a very old name for PyArg_Parse, hence the name of this method).
"""
return self.fmt
def getargsArgs(self, name):
"""Return an argument for use with PyArg_Parse().
Example: int.getargsArgs("spam") returns the string "&spam".
"""
return "&" + name
def getargsPreCheck(self, name):
"""Perform any actions needed before calling getargs().
This could include declaring temporary variables and such.
"""
def getargsCheck(self, name):
"""Perform any needed post-[new]getargs() checks.
This is type-dependent; the default does not check for errors.
An example would be a check for a maximum string length, or it
could do post-getargs() copying or conversion."""
def passInput(self, name):
"""Return an argument for passing a variable into a call.
Example: int.passInput("spam") returns the string "spam".
"""
return name
def passOutput(self, name):
"""Return an argument for returning a variable out of a call.
Example: int.passOutput("spam") returns the string "&spam".
"""
return "&" + name
def passReference(self, name):
"""Return an argument for C++ pass-by-reference.
Default is to call passInput().
"""
return self.passInput(name)
def errorCheck(self, name):
"""Check for an error returned in the variable.
This is type-dependent; the default does not check for errors.
An example would be a check for a NULL pointer.
If an error is found, the generated routine should
raise an exception and return NULL.
XXX There should be a way to add error clean-up code.
"""
Output("/* XXX no err check for %s %s */", self.typeName, name)
def mkvalue(self):
return self.mkvalueFormat(), self.mkvalueArgs()
def mkvalueFormat(self):
"""Return the format for this type for use with Py_BuildValue().
This is normally the same as getargsFormat() but it is
a separate function to allow future divergence.
(mkvalue is a very old name for Py_BuildValue, hence the name of this
method).
"""
return self.getargsFormat()
def mkvalueArgs(self, name):
"""Return an argument for use with Py_BuildValue().
Example: int.mkvalueArgs("spam") returns the string "spam".
"""
return name
def mkvaluePreCheck(self, name):
"""Perform any actions needed before calling mkvalue().
This could include declaring temporary variables and such.
"""
def cleanup(self, name):
"""Clean up if necessary.
This is normally empty; it may deallocate buffers etc.
"""
pass
class ByAddressType(Type):
"Simple type that is also passed by address for input"
def passInput(self, name):
return "&%s" % name
# Sometimes it's useful to define a type that's only usable as input or output parameter
class InputOnlyMixIn:
"Mix-in class to boobytrap passOutput"
def passOutput(self, name):
raise RuntimeError, "Type '%s' can only be used for input parameters" % self.typeName
class InputOnlyType(InputOnlyMixIn, Type):
"Same as Type, but only usable for input parameters -- passOutput is boobytrapped"
class OutputOnlyMixIn:
"Mix-in class to boobytrap passInput"
def passInput(self, name):
raise RuntimeError, "Type '%s' can only be used for output parameters" % self.typeName
class OutputOnlyType(OutputOnlyMixIn, Type):
"Same as Type, but only usable for output parameters -- passInput is boobytrapped"
# A modest collection of standard C types.
void = None
char = Type("char", "c")
short = Type("short", "h")
unsigned_short = Type("unsigned short", "H")
int = Type("int", "i")
long = Type("long", "l")
unsigned_long = Type("unsigned long", "l")
float = Type("float", "f")
double = Type("double", "d")
# The most common use of character pointers is a null-terminated string.
# For input, this is easy. For output, and for other uses of char *,
# see the module bgenBuffer.
stringptr = InputOnlyType("char*", "s")
unicodestringptr = InputOnlyType("wchar_t *", "u")
# Some Python related types.
objectptr = Type("PyObject*", "O")
stringobjectptr = Type("PyStringObject*", "S")
# Etc.
class FakeType(InputOnlyType):
"""A type that is not represented in the Python version of the interface.
Instantiate with a value to pass in the call.
"""
def __init__(self, substitute):
self.substitute = substitute
self.typeName = None # Don't show this argument in __doc__ string
def getArgDeclarations(self, name, reference=False, constmode=False, outmode=False):
return []
def getAuxDeclarations(self, name, reference=False):
return []
def getargsFormat(self):
return ""
def getargsArgs(self, name):
return None
def passInput(self, name):
return self.substitute
class OpaqueType(Type):
"""A type represented by an opaque object type, always passed by address.
Instantiate with the type name and the names of the new and convert procs.
If fewer than three arguments are passed, the second argument is used
to derive the new and convert procs by appending _New and _Convert; it
defaults to the first argument.
"""
def __init__(self, name, arg = None, extra = None):
self.typeName = name
if extra is None:
# Two arguments (name, usetype) or one (name)
arg = arg or name
self.new = arg + '_New'
self.convert = arg + '_Convert'
else:
# Three arguments (name, new, convert)
self.new = arg
self.convert = extra
def getargsFormat(self):
return "O&"
def getargsArgs(self, name):
return "%s, &%s" % (self.convert, name)
def passInput(self, name):
return "&%s" % name
def mkvalueFormat(self):
return "O&"
def mkvalueArgs(self, name):
return "%s, &%s" % (self.new, name)
class OpaqueByValueType(OpaqueType):
"""A type represented by an opaque object type, on input passed BY VALUE.
Instantiate with the type name, and optionally an object type name whose
New/Convert functions will be used.
"""
def passInput(self, name):
return name
def mkvalueArgs(self, name):
return "%s, %s" % (self.new, name)
class OpaqueByRefType(OpaqueType):
"""An opaque object type, passed by reference.
Instantiate with the type name, and optionally an object type name whose
New/Convert functions will be used.
"""
def passInput(self, name):
return name
# def passOutput(self, name):
# return name
def mkvalueFormat(self):
return "O"
def mkvalueArgs(self, name):
return "%s(%s)" % (self.new, name)
class OpaqueByValueStructType(OpaqueByValueType):
"""Similar to OpaqueByValueType, but we also pass this to mkvalue by
address, in stead of by value.
"""
def mkvalueArgs(self, name):
return "%s, &%s" % (self.new, name)
class OpaqueArrayType(OpaqueByValueType):
"""A type represented by an opaque object type, with ARRAY passing semantics.
Instantiate with the type name, and optional an object type name whose
New/Convert functions will be used.
"""
def getargsArgs(self, name):
return "%s, %s" % (self.convert, name)
def passOutput(self, name):
return name
| gpl-2.0 |
DMSC-Instrument-Data/lewis | setup.py | 2 | 2713 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# *********************************************************************
# lewis - a library for creating hardware device simulators
# Copyright (C) 2016-2017 European Spallation Source ERIC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# *********************************************************************
from setuptools import setup, find_packages
# as suggested on http://python-packaging.readthedocs.io/en/latest/metadata.html
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='lewis',
version='1.2.0',
description='LeWIS - Let\'s Write Intricate Simulators!',
long_description=readme(),
url='https://github.com/DMSC-Instrument-Data/lewis',
author='Michael Hart, Michael Wedel, Owen Arnold',
author_email='[email protected]',
license='GPL v3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='hardware simulation controls',
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=['six', 'pyzmq', 'json-rpc', 'semantic_version',
'PyYAML', 'scanf>=1.4.1'],
extras_require={
'epics': ['pcaspy'],
'dev': ['flake8', 'mock>=1.0.1', 'sphinx>=1.4.5', 'sphinx_rtd_theme',
'pytest', 'pytest-cov', 'coverage', 'tox'],
},
entry_points={
'console_scripts': [
'lewis=lewis.scripts.run:run_simulation',
'lewis-control=lewis.scripts.control:control_simulation'
],
},
)
| gpl-3.0 |
archf/ansible | test/units/module_utils/test_network_common.py | 31 | 5437 | # -*- coding: utf-8 -*-
#
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.module_utils.network_common import to_list, sort_list
from ansible.module_utils.network_common import dict_diff, dict_merge
from ansible.module_utils.network_common import conditional, Template
class TestModuleUtilsNetworkCommon(unittest.TestCase):
def test_to_list(self):
for scalar in ('string', 1, True, False, None):
self.assertTrue(isinstance(to_list(scalar), list))
for container in ([1, 2, 3], {'one': 1}):
self.assertTrue(isinstance(to_list(container), list))
test_list = [1, 2, 3]
self.assertNotEqual(id(test_list), id(to_list(test_list)))
def test_sort(self):
data = [3, 1, 2]
self.assertEqual([1, 2, 3], sort_list(data))
string_data = '123'
self.assertEqual(string_data, sort_list(string_data))
def test_dict_diff(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_diff(base, other)
# string assertions
self.assertNotIn('one', result)
self.assertNotIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertNotIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [2, 1])
self.assertNotIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertNotIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertNotIn('key2', result['obj1'])
# bool assertions
self.assertNotIn('b1', result)
self.assertNotIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
self.assertIn('one', result)
self.assertIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [1, 2, 3])
self.assertIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertIn('key2', result['obj1'])
# bool assertions
self.assertIn('b1', result)
self.assertIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_conditional(self):
self.assertTrue(conditional(10, 10))
self.assertTrue(conditional('10', '10'))
self.assertTrue(conditional('foo', 'foo'))
self.assertTrue(conditional(True, True))
self.assertTrue(conditional(False, False))
self.assertTrue(conditional(None, None))
self.assertTrue(conditional("ge(1)", 1))
self.assertTrue(conditional("gt(1)", 2))
self.assertTrue(conditional("le(2)", 2))
self.assertTrue(conditional("lt(3)", 2))
self.assertTrue(conditional("eq(1)", 1))
self.assertTrue(conditional("neq(0)", 1))
self.assertTrue(conditional("min(1)", 1))
self.assertTrue(conditional("max(1)", 1))
self.assertTrue(conditional("exactly(1)", 1))
def test_template(self):
tmpl = Template()
self.assertEqual('foo', tmpl('{{ test }}', {'test': 'foo'}))
| gpl-3.0 |
kaiserroll14/301finalproject | main/osx/main/requests/packages/__init__.py | 838 | 1384 | '''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
from . import urllib3
except ImportError:
import urllib3
sys.modules['%s.urllib3' % __name__] = urllib3
try:
from . import chardet
except ImportError:
import chardet
sys.modules['%s.chardet' % __name__] = chardet
| gpl-3.0 |
kata198/usrsvc | usrsvcmod/Monitoring/ActivityFile.py | 1 | 3670 | '''
Copyright (c) 2016 Tim Savannah All Rights Reserved.
This software is licensed under the terms of the GPLv3.
This may change at my discretion, retroactively, and without notice.
You should have received a copy of this with the source distribution as a file titled, LICENSE.
The most current license can be found at:
https://github.com/kata198/usrsvc/LICENSE
This location may need to be changed at some point in the future, in which case
you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the
current website intended for distribution of usrsvc.
ActivityFileMonitor - Asserts that a specific file or directory should be modified within a certain threshold
'''
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
import os
import time
from func_timeout import FunctionTimedOut
from . import MonitoringBase
from ..logging import logMsg, logErr
# TODO: We need to implement the check here as launching and joining on a thread, so that we don't lockup all monitoring if someone
# uses an NFS file on a disconnected device or anything else that will result in an indefinite uninterruptable ("D") state.
class ActivityFileMonitor(MonitoringBase):
'''
ActivityFileMonitor - Class for doing activity file monitoring
'''
def __init__(self, programName, activityFile, activityFileLimit):
MonitoringBase.__init__(self)
self.programName = programName
self.activityFile = activityFile
self.activityFileLimit = activityFileLimit
@classmethod
def createFromConfig(cls, programConfig):
if not programConfig.Monitoring.activityfile:
return None
return cls(programConfig.name, programConfig.Monitoring.activityfile, programConfig.Monitoring.activityfile_limit)
def shouldRestart(self, program=None):
'''
Returns True if activity file has not been modified within the threshold specified by activityfile_limit (should restart), otherwise False.
@param program - unused.
'''
activityFile = self.activityFile
activityFileLimit = self.activityFileLimit
programName = self.programName
if not activityFile:
# Yes this is checked twice if created through createFromConfig, but it may be called otherwise so better safe.
return False
try:
# If activity file is not present, this is a fail and we restart.
if not os.path.exists(activityFile):
self.setReason('Restarting %s because activity file ( %s ) does not exist\n' %(programName, activityFile,))
return True
# Gather the mtime and see if we are past the threshold
lastModified = os.stat(activityFile).st_mtime
now = time.time()
threshold = float(now - self.activityFileLimit)
if lastModified < threshold:
self.setReason('Restarting %s because it has not modified activity file ( %s ) in %.4f seconds. Limit is %d seconds.\n' %(programName, activityFile, float(now - lastModified), activityFileLimit) )
return True
except FunctionTimedOut:
logErr('MONITOR: ActivityFile timed out on %s\n' %(programName,))
raise
except Exception as e:
# If we got an exception, just log and try again next round.
logErr('Got an exception in activity file monitoring. Not restarting program. Program="%s" activityfile="%s"\nlocals: %s\n' %(programName, activityFile, str(locals())))
return False
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
| lgpl-2.1 |
johnsensible/django-sendfile | examples/protected_downloads/settings.py | 4 | 2706 | # Django settings for protected_downloads project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'download.db'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n309^dwk=@+g72ko--8vjyz&1v0u%xf#*0=wzr=2n#f3hb0a=l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'protected_downloads.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'download',
'sendfile',
)
# SENDFILE settings
SENDFILE_BACKEND = 'sendfile.backends.development'
#SENDFILE_BACKEND = 'sendfile.backends.xsendfile'
#SENDFILE_BACKEND = 'sendfile.backends.nginx'
SENDFILE_ROOT = os.path.join(PROJECT_ROOT, 'protected')
SENDFILE_URL = '/protected'
| bsd-3-clause |
stefanoteso/musm-adt17 | musm/pc.py | 1 | 4018 | import numpy as np
import gurobipy as gurobi
from .problem import Problem
class PC(Problem):
_ATTRIBUTES = [
('cpu', 37),
('hd', 10),
('manufacturer', 8),
('ram', 10),
('monitor', 8),
('pctype', 3),
]
_ATTR_TO_COSTS = {
'pctype': [50, 0, 80],
'manufacturer': [100, 0, 100, 50, 0, 0, 50, 50],
'cpu' : [
1.4*100, 1.4*130, 1.1*70, 1.1*90, 1.2*80, 1.2*50, 1.2*60, 1.2*80,
1.2*90, 1.2*100, 1.2*110, 1.2*120, 1.2*130, 1.2*140, 1.2*170,
1.5*50, 1.5*60, 1.5*80, 1.5*90, 1.5*100, 1.5*110, 1.5*130, 1.5*150,
1.5*160, 1.5*170, 1.5*180, 1.5*220, 1.4*27, 1.4*30, 1.4*40, 1.4*45,
1.4*50, 1.4*55, 1.4*60, 1.4*70, 1.6*70, 1.6*73,
],
'monitor': [
0.6*100, 0.6*104, 0.6*120, 0.6*133, 0.6*140, 0.6*150, 0.6*170,
0.6*210
],
'ram': [
0.8*64, 0.8*128, 0.8*160, 0.8*192, 0.8*256, 0.8*320, 0.8*384,
0.8*512, 0.8*1024, 0.8*2048
],
'hd': [
4*8, 4*10, 4*12, 4*15, 4*20, 4*30, 4*40, 4*60, 4*80, 4*120
],
}
def __init__(self, **kwargs):
super().__init__(sum(attr[1] for attr in self._ATTRIBUTES))
self.cost_matrix = np.hstack([
np.array(self._ATTR_TO_COSTS[attr], dtype=float)
for attr, _ in self._ATTRIBUTES
]).reshape((1, -1)) / 2754.4
def _add_constraints(self, model, x):
base, offs = 0, {}
for attr, size in self._ATTRIBUTES:
offs[attr] = base
x_attr = [x[z] for z in range(base, base + size)]
model.addConstr(gurobi.quicksum(x_attr) == 1)
base += size
def implies(head, body):
# NOTE here we subtract 1 from head and body bits because the bit
# numbers in the constraints were computed starting from one, to
# work in MiniZinc, while Gurobi expects them to start from zero
head = 1 - x[head - 1]
body = gurobi.quicksum([x[i - 1] for i in body])
return model.addConstr(head + body >= 1)
# Manufacturer -> Type
implies(offs['manufacturer'] + 2, [offs['pctype'] + i for i in [1, 2]])
implies(offs['manufacturer'] + 4, [offs['pctype'] + 1])
implies(offs['manufacturer'] + 6, [offs['pctype'] + 2])
implies(offs['manufacturer'] + 7, [offs['pctype'] + i for i in [1, 3]])
# Manufacturer -> CPU
implies(offs['manufacturer'] + 1, [offs['cpu'] + i for i in range(28, 37+1)])
implies(offs['manufacturer'] + 2, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 7, [offs['cpu'] + i for i in list(range(1, 4+1)) + list(range(6, 27+1))])
implies(offs['manufacturer'] + 4, [offs['cpu'] + i for i in range(5, 27+1)])
implies(offs['manufacturer'] + 3, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 5, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 8, [offs['cpu'] + i for i in range(6, 27+1)])
implies(offs['manufacturer'] + 6, [offs['cpu'] + i for i in range(16, 27+1)])
# Type -> RAM
implies(offs['pctype'] + 1, [offs['ram'] + i for i in range(1, 9+1)])
implies(offs['pctype'] + 2, [offs['ram'] + i for i in [2, 5, 8, 9]])
implies(offs['pctype'] + 3, [offs['ram'] + i for i in [5, 8, 9, 10]])
# Type -> HD
implies(offs['pctype'] + 1, [offs['hd'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['hd'] + i for i in range(5, 10+1)])
implies(offs['pctype'] + 3, [offs['hd'] + i for i in range(5, 10+1)])
# Type -> Monitor
implies(offs['pctype'] + 1, [offs['monitor'] + i for i in range(1, 6+1)])
implies(offs['pctype'] + 2, [offs['monitor'] + i for i in range(6, 8+1)])
implies(offs['pctype'] + 3, [offs['monitor'] + i for i in range(6, 8+1)])
| mit |
procangroup/edx-platform | openedx/core/djangoapps/programs/signals.py | 11 | 1873 | """
This module contains signals / handlers related to programs.
"""
import logging
from django.dispatch import receiver
from openedx.core.djangoapps.signals.signals import COURSE_CERT_AWARDED
LOGGER = logging.getLogger(__name__)
@receiver(COURSE_CERT_AWARDED)
def handle_course_cert_awarded(sender, user, course_key, mode, status, **kwargs): # pylint: disable=unused-argument
"""
If programs is enabled and a learner is awarded a course certificate,
schedule a celery task to process any programs certificates for which
the learner may now be eligible.
Args:
sender:
class of the object instance that sent this signal
user:
django.contrib.auth.User - the user to whom a cert was awarded
course_key:
refers to the course run for which the cert was awarded
mode:
mode / certificate type, e.g. "verified"
status:
either "downloadable" or "generating"
Returns:
None
"""
# Import here instead of top of file since this module gets imported before
# the credentials app is loaded, resulting in a Django deprecation warning.
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
# Avoid scheduling new tasks if certification is disabled.
if not CredentialsApiConfig.current().is_learner_issuance_enabled:
return
# schedule background task to process
LOGGER.debug(
'handling COURSE_CERT_AWARDED: username=%s, course_key=%s, mode=%s, status=%s',
user,
course_key,
mode,
status,
)
# import here, because signal is registered at startup, but items in tasks are not yet able to be loaded
from openedx.core.djangoapps.programs.tasks.v1.tasks import award_program_certificates
award_program_certificates.delay(user.username)
| agpl-3.0 |
hbrunn/OpenUpgrade | addons/hr_holidays/tests/test_holidays_flow.py | 44 | 10276 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.addons.hr_holidays.tests.common import TestHrHolidaysBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
cr, uid = self.cr, self.uid
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
self.holidays_status_dummy = self.hr_holidays_status.create(cr, self.user_hruser_id, {
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
self.holidays_status_0 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.registry('calendar.event.type').create(cr, self.user_hrmanager_id, {'name': 'NotLimitedMeetingType'}),
})
self.holidays_status_1 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = self.hr_holidays_status.create(cr, self.user_hrmanager_id, {
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave request
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a no-limit category
hol1_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1 = self.hr_holidays.browse(cr, self.user_hruser_id, hol1_id)
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
self.hr_holidays.signal_validate(cr, self.user_employee_id, [hol1_id])
hol1.refresh()
self.assertEqual(hol1.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
self.hr_holidays.signal_validate(cr, self.user_hrmanager_id, [hol1_id])
hol1.refresh()
self.assertEqual(hol1.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(except_orm):
self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
self.hr_holidays.unlink(cr, uid, self.hr_holidays.search(cr, uid, [('name', 'in', ['Hol21', 'Hol22'])]))
# HrUser allocates some leaves to the employee
aloc1_id = self.hr_holidays.create(cr, self.user_hruser_id, {
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
self.hr_holidays.signal_validate(cr, self.user_hruser_id, [aloc1_id])
self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [aloc1_id])
# Checks Employee has effectively some days left
hol_status_2 = self.hr_holidays_status.browse(cr, self.user_employee_id, self.holidays_status_2)
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2_id = self.hr_holidays.create(cr, self.user_employee_id, {
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2 = self.hr_holidays.browse(cr, self.user_hruser_id, hol2_id)
# Check left days: - 1 virtual remaining day
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
self.hr_holidays.signal_validate(cr, self.user_hruser_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
self.hr_holidays.signal_second_validate(cr, self.user_hruser_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
self.hr_holidays.signal_refuse(cr, self.user_hrmanager_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2.refresh()
_check_holidays_status(hol_status_2, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
self.hr_holidays.signal_reset(cr, self.user_hruser_id, [hol2_id])
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
self.hr_holidays.signal_reset(cr, self.user_hrmanager_id, [hol2_id])
hol2.refresh()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
# HrManager changes the date and put too much days -> crash when confirming
self.hr_holidays.write(cr, self.user_hrmanager_id, [hol2_id], {
'date_from': (datetime.today() + relativedelta(days=4)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=7)),
'number_of_days_temp': 4,
})
with self.assertRaises(except_orm):
self.hr_holidays.signal_confirm(cr, self.user_hrmanager_id, [hol2_id])
| agpl-3.0 |
Subsets and Splits