file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
version.py | #!/usr/bin/python3
"""Script to determine the Pywikibot version (tag, revision and date).
.. versionchanged:: 7.0
version script was moved to the framework scripts folder
"""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import get_toolforge_hostname, getversion
class DummyModule:
"""Fake module instance."""
__version__ = 'n/a'
try:
import setuptools
except ImportError:
setuptools = DummyModule()
try:
import mwparserfromhell
except ImportError:
mwparserfromhell = DummyModule()
try:
import wikitextparser
except ImportError:
wikitextparser = DummyModule()
try:
import requests
except ImportError:
requests = DummyModule()
WMF_CACERT = 'MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs'
def | (*args: str) -> None:
"""Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('setuptools version: ' + setuptools.__version__)
pywikibot.output('mwparserfromhell version: '
+ mwparserfromhell.__version__)
pywikibot.output('wikitextparser version: ' + wikitextparser.__version__)
pywikibot.output('requests version: ' + requests.__version__)
has_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format('ok' if has_wikimedia_cert else 'not ok'))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
# check environment settings
settings = {key for key in os.environ if key.startswith('PYWIKIBOT')}
settings.update(['PYWIKIBOT_DIR', 'PYWIKIBOT_DIR_PWB',
'PYWIKIBOT_NO_USER_CONFIG'])
for environ_name in sorted(settings):
pywikibot.output(
'{}: {}'.format(environ_name,
os.environ.get(environ_name, 'Not set') or "''"))
pywikibot.output('Config base dir: ' + pywikibot.config.base_dir)
for family, usernames in pywikibot.config.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family {!r}:'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{}: {}'.format(lang, username))
if __name__ == '__main__':
main()
| main | identifier_name |
version.py | #!/usr/bin/python3
"""Script to determine the Pywikibot version (tag, revision and date).
.. versionchanged:: 7.0
version script was moved to the framework scripts folder
"""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import get_toolforge_hostname, getversion
class DummyModule:
"""Fake module instance."""
__version__ = 'n/a'
try:
import setuptools
except ImportError:
setuptools = DummyModule()
try:
import mwparserfromhell
except ImportError:
mwparserfromhell = DummyModule()
try:
import wikitextparser
except ImportError:
wikitextparser = DummyModule()
try:
import requests
except ImportError:
requests = DummyModule()
WMF_CACERT = 'MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs'
def main(*args: str) -> None:
|
if __name__ == '__main__':
main()
| """Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('setuptools version: ' + setuptools.__version__)
pywikibot.output('mwparserfromhell version: '
+ mwparserfromhell.__version__)
pywikibot.output('wikitextparser version: ' + wikitextparser.__version__)
pywikibot.output('requests version: ' + requests.__version__)
has_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format('ok' if has_wikimedia_cert else 'not ok'))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
# check environment settings
settings = {key for key in os.environ if key.startswith('PYWIKIBOT')}
settings.update(['PYWIKIBOT_DIR', 'PYWIKIBOT_DIR_PWB',
'PYWIKIBOT_NO_USER_CONFIG'])
for environ_name in sorted(settings):
pywikibot.output(
'{}: {}'.format(environ_name,
os.environ.get(environ_name, 'Not set') or "''"))
pywikibot.output('Config base dir: ' + pywikibot.config.base_dir)
for family, usernames in pywikibot.config.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family {!r}:'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{}: {}'.format(lang, username)) | identifier_body |
challenge.py | # -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2014-12-07 Cornelius Kölbel <[email protected]>
#
# Copyright (C) 2014 Cornelius Kölbel
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a helper module for the challenges database table.
It is used by the lib.tokenclass
The method is tested in test_lib_challenges
"""
import logging
import six
from .log import log_with
from ..models import Challenge
from privacyidea.lib.error import ParameterError
log = logging.getLogger(__name__)
@log_with(log)
def get_challenges(serial=None, transaction_id=None, challenge=None):
"""
This returns a list of database challenge objects.
:param serial: challenges for this very serial number
:param transaction_id: challenges with this very transaction id
:param challenge: The challenge to be found | """
sql_query = Challenge.query
if serial is not None:
# filter for serial
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None:
# filter for transaction id
sql_query = sql_query.filter(Challenge.transaction_id ==
transaction_id)
if challenge is not None:
# filter for this challenge
sql_query = sql_query.filter(Challenge.challenge == challenge)
challenges = sql_query.all()
return challenges
@log_with(log)
def get_challenges_paginate(serial=None, transaction_id=None,
sortby=Challenge.timestamp,
sortdir="asc", psize=15, page=1):
"""
This function is used to retrieve a challenge list, that can be displayed in
the Web UI. It supports pagination.
Each retrieved page will also contain a "next" and a "prev", indicating
the next or previous page. If either does not exist, it is None.
:param serial: The serial of the token
:param transaction_id: The transaction_id of the challenge
:param sortby: Sort by a Challenge DB field. The default is
Challenge.timestamp.
:type sortby: A Challenge column or a string.
:param sortdir: Can be "asc" (default) or "desc"
:type sortdir: basestring
:param psize: The size of the page
:type psize: int
:param page: The number of the page to view. Starts with 1 ;-)
:type page: int
:return: dict with challenges, prev, next and count
:rtype: dict
"""
sql_query = _create_challenge_query(serial=serial,
transaction_id=transaction_id)
if isinstance(sortby, six.string_types):
# convert the string to a Challenge column
cols = Challenge.__table__.columns
sortby = cols.get(sortby)
if sortdir == "desc":
sql_query = sql_query.order_by(sortby.desc())
else:
sql_query = sql_query.order_by(sortby.asc())
pagination = sql_query.paginate(page, per_page=psize,
error_out=False)
challenges = pagination.items
prev = None
if pagination.has_prev:
prev = page-1
next = None
if pagination.has_next:
next = page + 1
challenge_list = []
for challenge in challenges:
challenge_dict = challenge.get()
challenge_list.append(challenge_dict)
ret = {"challenges": challenge_list,
"prev": prev,
"next": next,
"current": page,
"count": pagination.total}
return ret
def _create_challenge_query(serial=None, transaction_id=None):
"""
This function create the sql query for fetching transaction_ids. It is
used by get_challenge_paginate.
:return: An SQLAlchemy sql query
"""
sql_query = Challenge.query
if serial is not None and serial.strip("*"):
# filter for serial
if "*" in serial:
# match with "like"
sql_query = sql_query.filter(Challenge.serial.like(serial.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None and transaction_id.strip("*"):
# filter for serial
if "*" in transaction_id:
# match with "like"
sql_query = sql_query.filter(Challenge.transaction_id.like(
transaction_id.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.transaction_id == transaction_id)
return sql_query
def extract_answered_challenges(challenges):
"""
Given a list of challenge objects, extract and return a list of *answered* challenge.
A challenge is answered if it is not expired yet *and* if its ``otp_valid`` attribute
is set to True.
:param challenges: a list of challenge objects
:return: a list of answered challenge objects
"""
answered_challenges = []
for challenge in challenges:
# check if we are still in time.
if challenge.is_valid():
_, status = challenge.get_otp_status()
if status is True:
answered_challenges.append(challenge)
return answered_challenges | :return: list of objects | random_line_split |
challenge.py | # -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2014-12-07 Cornelius Kölbel <[email protected]>
#
# Copyright (C) 2014 Cornelius Kölbel
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a helper module for the challenges database table.
It is used by the lib.tokenclass
The method is tested in test_lib_challenges
"""
import logging
import six
from .log import log_with
from ..models import Challenge
from privacyidea.lib.error import ParameterError
log = logging.getLogger(__name__)
@log_with(log)
def get_challenges(serial=None, transaction_id=None, challenge=None):
"""
This returns a list of database challenge objects.
:param serial: challenges for this very serial number
:param transaction_id: challenges with this very transaction id
:param challenge: The challenge to be found
:return: list of objects
"""
sql_query = Challenge.query
if serial is not None:
# filter for serial
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None:
# filter for transaction id
sql_query = sql_query.filter(Challenge.transaction_id ==
transaction_id)
if challenge is not None:
# filter for this challenge
sql_query = sql_query.filter(Challenge.challenge == challenge)
challenges = sql_query.all()
return challenges
@log_with(log)
def ge | erial=None, transaction_id=None,
sortby=Challenge.timestamp,
sortdir="asc", psize=15, page=1):
"""
This function is used to retrieve a challenge list, that can be displayed in
the Web UI. It supports pagination.
Each retrieved page will also contain a "next" and a "prev", indicating
the next or previous page. If either does not exist, it is None.
:param serial: The serial of the token
:param transaction_id: The transaction_id of the challenge
:param sortby: Sort by a Challenge DB field. The default is
Challenge.timestamp.
:type sortby: A Challenge column or a string.
:param sortdir: Can be "asc" (default) or "desc"
:type sortdir: basestring
:param psize: The size of the page
:type psize: int
:param page: The number of the page to view. Starts with 1 ;-)
:type page: int
:return: dict with challenges, prev, next and count
:rtype: dict
"""
sql_query = _create_challenge_query(serial=serial,
transaction_id=transaction_id)
if isinstance(sortby, six.string_types):
# convert the string to a Challenge column
cols = Challenge.__table__.columns
sortby = cols.get(sortby)
if sortdir == "desc":
sql_query = sql_query.order_by(sortby.desc())
else:
sql_query = sql_query.order_by(sortby.asc())
pagination = sql_query.paginate(page, per_page=psize,
error_out=False)
challenges = pagination.items
prev = None
if pagination.has_prev:
prev = page-1
next = None
if pagination.has_next:
next = page + 1
challenge_list = []
for challenge in challenges:
challenge_dict = challenge.get()
challenge_list.append(challenge_dict)
ret = {"challenges": challenge_list,
"prev": prev,
"next": next,
"current": page,
"count": pagination.total}
return ret
def _create_challenge_query(serial=None, transaction_id=None):
"""
This function create the sql query for fetching transaction_ids. It is
used by get_challenge_paginate.
:return: An SQLAlchemy sql query
"""
sql_query = Challenge.query
if serial is not None and serial.strip("*"):
# filter for serial
if "*" in serial:
# match with "like"
sql_query = sql_query.filter(Challenge.serial.like(serial.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None and transaction_id.strip("*"):
# filter for serial
if "*" in transaction_id:
# match with "like"
sql_query = sql_query.filter(Challenge.transaction_id.like(
transaction_id.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.transaction_id == transaction_id)
return sql_query
def extract_answered_challenges(challenges):
"""
Given a list of challenge objects, extract and return a list of *answered* challenge.
A challenge is answered if it is not expired yet *and* if its ``otp_valid`` attribute
is set to True.
:param challenges: a list of challenge objects
:return: a list of answered challenge objects
"""
answered_challenges = []
for challenge in challenges:
# check if we are still in time.
if challenge.is_valid():
_, status = challenge.get_otp_status()
if status is True:
answered_challenges.append(challenge)
return answered_challenges
| t_challenges_paginate(s | identifier_name |
challenge.py | # -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2014-12-07 Cornelius Kölbel <[email protected]>
#
# Copyright (C) 2014 Cornelius Kölbel
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a helper module for the challenges database table.
It is used by the lib.tokenclass
The method is tested in test_lib_challenges
"""
import logging
import six
from .log import log_with
from ..models import Challenge
from privacyidea.lib.error import ParameterError
log = logging.getLogger(__name__)
@log_with(log)
def get_challenges(serial=None, transaction_id=None, challenge=None):
"""
This returns a list of database challenge objects.
:param serial: challenges for this very serial number
:param transaction_id: challenges with this very transaction id
:param challenge: The challenge to be found
:return: list of objects
"""
sql_query = Challenge.query
if serial is not None:
# filter for serial
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None:
# filter for transaction id
sql_query = sql_query.filter(Challenge.transaction_id ==
transaction_id)
if challenge is not None:
# filter for this challenge
sql_query = sql_query.filter(Challenge.challenge == challenge)
challenges = sql_query.all()
return challenges
@log_with(log)
def get_challenges_paginate(serial=None, transaction_id=None,
sortby=Challenge.timestamp,
sortdir="asc", psize=15, page=1):
"""
This function is used to retrieve a challenge list, that can be displayed in
the Web UI. It supports pagination.
Each retrieved page will also contain a "next" and a "prev", indicating
the next or previous page. If either does not exist, it is None.
:param serial: The serial of the token
:param transaction_id: The transaction_id of the challenge
:param sortby: Sort by a Challenge DB field. The default is
Challenge.timestamp.
:type sortby: A Challenge column or a string.
:param sortdir: Can be "asc" (default) or "desc"
:type sortdir: basestring
:param psize: The size of the page
:type psize: int
:param page: The number of the page to view. Starts with 1 ;-)
:type page: int
:return: dict with challenges, prev, next and count
:rtype: dict
"""
sql_query = _create_challenge_query(serial=serial,
transaction_id=transaction_id)
if isinstance(sortby, six.string_types):
# convert the string to a Challenge column
cols = Challenge.__table__.columns
sortby = cols.get(sortby)
if sortdir == "desc":
sql_query = sql_query.order_by(sortby.desc())
else:
sql_query = sql_query.order_by(sortby.asc())
pagination = sql_query.paginate(page, per_page=psize,
error_out=False)
challenges = pagination.items
prev = None
if pagination.has_prev:
prev = page-1
next = None
if pagination.has_next:
next = page + 1
challenge_list = []
for challenge in challenges:
challenge_dict = challenge.get()
challenge_list.append(challenge_dict)
ret = {"challenges": challenge_list,
"prev": prev,
"next": next,
"current": page,
"count": pagination.total}
return ret
def _create_challenge_query(serial=None, transaction_id=None):
"""
This function create the sql query for fetching transaction_ids. It is
used by get_challenge_paginate.
:return: An SQLAlchemy sql query
"""
sql_query = Challenge.query
if serial is not None and serial.strip("*"):
# filter for serial
if "*" in serial:
# match with "like"
sq | else:
# exact match
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None and transaction_id.strip("*"):
# filter for serial
if "*" in transaction_id:
# match with "like"
sql_query = sql_query.filter(Challenge.transaction_id.like(
transaction_id.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.transaction_id == transaction_id)
return sql_query
def extract_answered_challenges(challenges):
"""
Given a list of challenge objects, extract and return a list of *answered* challenge.
A challenge is answered if it is not expired yet *and* if its ``otp_valid`` attribute
is set to True.
:param challenges: a list of challenge objects
:return: a list of answered challenge objects
"""
answered_challenges = []
for challenge in challenges:
# check if we are still in time.
if challenge.is_valid():
_, status = challenge.get_otp_status()
if status is True:
answered_challenges.append(challenge)
return answered_challenges
| l_query = sql_query.filter(Challenge.serial.like(serial.replace(
"*", "%")))
| conditional_block |
challenge.py | # -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2014-12-07 Cornelius Kölbel <[email protected]>
#
# Copyright (C) 2014 Cornelius Kölbel
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a helper module for the challenges database table.
It is used by the lib.tokenclass
The method is tested in test_lib_challenges
"""
import logging
import six
from .log import log_with
from ..models import Challenge
from privacyidea.lib.error import ParameterError
log = logging.getLogger(__name__)
@log_with(log)
def get_challenges(serial=None, transaction_id=None, challenge=None):
"""
This returns a list of database challenge objects.
:param serial: challenges for this very serial number
:param transaction_id: challenges with this very transaction id
:param challenge: The challenge to be found
:return: list of objects
"""
sql_query = Challenge.query
if serial is not None:
# filter for serial
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None:
# filter for transaction id
sql_query = sql_query.filter(Challenge.transaction_id ==
transaction_id)
if challenge is not None:
# filter for this challenge
sql_query = sql_query.filter(Challenge.challenge == challenge)
challenges = sql_query.all()
return challenges
@log_with(log)
def get_challenges_paginate(serial=None, transaction_id=None,
sortby=Challenge.timestamp,
sortdir="asc", psize=15, page=1):
"" |
def _create_challenge_query(serial=None, transaction_id=None):
"""
This function create the sql query for fetching transaction_ids. It is
used by get_challenge_paginate.
:return: An SQLAlchemy sql query
"""
sql_query = Challenge.query
if serial is not None and serial.strip("*"):
# filter for serial
if "*" in serial:
# match with "like"
sql_query = sql_query.filter(Challenge.serial.like(serial.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None and transaction_id.strip("*"):
# filter for serial
if "*" in transaction_id:
# match with "like"
sql_query = sql_query.filter(Challenge.transaction_id.like(
transaction_id.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.transaction_id == transaction_id)
return sql_query
def extract_answered_challenges(challenges):
"""
Given a list of challenge objects, extract and return a list of *answered* challenge.
A challenge is answered if it is not expired yet *and* if its ``otp_valid`` attribute
is set to True.
:param challenges: a list of challenge objects
:return: a list of answered challenge objects
"""
answered_challenges = []
for challenge in challenges:
# check if we are still in time.
if challenge.is_valid():
_, status = challenge.get_otp_status()
if status is True:
answered_challenges.append(challenge)
return answered_challenges
| "
This function is used to retrieve a challenge list, that can be displayed in
the Web UI. It supports pagination.
Each retrieved page will also contain a "next" and a "prev", indicating
the next or previous page. If either does not exist, it is None.
:param serial: The serial of the token
:param transaction_id: The transaction_id of the challenge
:param sortby: Sort by a Challenge DB field. The default is
Challenge.timestamp.
:type sortby: A Challenge column or a string.
:param sortdir: Can be "asc" (default) or "desc"
:type sortdir: basestring
:param psize: The size of the page
:type psize: int
:param page: The number of the page to view. Starts with 1 ;-)
:type page: int
:return: dict with challenges, prev, next and count
:rtype: dict
"""
sql_query = _create_challenge_query(serial=serial,
transaction_id=transaction_id)
if isinstance(sortby, six.string_types):
# convert the string to a Challenge column
cols = Challenge.__table__.columns
sortby = cols.get(sortby)
if sortdir == "desc":
sql_query = sql_query.order_by(sortby.desc())
else:
sql_query = sql_query.order_by(sortby.asc())
pagination = sql_query.paginate(page, per_page=psize,
error_out=False)
challenges = pagination.items
prev = None
if pagination.has_prev:
prev = page-1
next = None
if pagination.has_next:
next = page + 1
challenge_list = []
for challenge in challenges:
challenge_dict = challenge.get()
challenge_list.append(challenge_dict)
ret = {"challenges": challenge_list,
"prev": prev,
"next": next,
"current": page,
"count": pagination.total}
return ret
| identifier_body |
http.py | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"[email protected]"}
# }}
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
import mo_json
from pyLibrary import convert
from mo_logs.exceptions import Except
from mo_logs import Log
from mo_dots import Data, coalesce, wrap, set_default, unwrap
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
from mo_math import Math
from jx_python import jx
from mo_threads import Thread, Lock
from mo_threads import Till
from mo_times.durations import Duration
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error("Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error("Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, unicode):
del headers[k]
if isinstance(v, unicode):
headers[k.encode("ascii")] = v.encode("ascii")
else:
headers[k.encode("ascii")] = v
elif isinstance(v, unicode):
headers[k] = v.encode("ascii")
def get(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'get', url, **kwargs))
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
c = response.all_content
return mo_json.json2value(convert.utf82unicode(c))
def options(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault(b'allow_redirects', False)
kwargs[b"stream"] = True
return HttpResponse(request(b'head', url, **kwargs))
def post(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request(b'delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if b"json" in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"json"]))
elif b'data' in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"data"]))
else:
Log.error("Expecting `json` parameter")
response = post(url, **kwargs)
c = response.content
try:
details = mo_json.json2value(convert.utf82unicode(c))
except Exception as e:
Log.error("Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error("Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request(b'put', url, **kwargs))
def patch(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'patch', url, **kwargs))
def delete(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding="utf8", flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith(".gz"):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error("Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
|
else:
self.shared = _shared
self.shared.ref_count += 1
def __iter__(self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close()
| self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
) | conditional_block |
http.py | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"[email protected]"}
# }}
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
import mo_json
from pyLibrary import convert
from mo_logs.exceptions import Except
from mo_logs import Log
from mo_dots import Data, coalesce, wrap, set_default, unwrap
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
from mo_math import Math
from jx_python import jx
from mo_threads import Thread, Lock
from mo_threads import Till
from mo_times.durations import Duration
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error("Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error("Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, unicode):
del headers[k]
if isinstance(v, unicode):
headers[k.encode("ascii")] = v.encode("ascii")
else:
headers[k.encode("ascii")] = v
elif isinstance(v, unicode):
headers[k] = v.encode("ascii")
def get(url, **kwargs):
|
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
c = response.all_content
return mo_json.json2value(convert.utf82unicode(c))
def options(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault(b'allow_redirects', False)
kwargs[b"stream"] = True
return HttpResponse(request(b'head', url, **kwargs))
def post(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request(b'delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if b"json" in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"json"]))
elif b'data' in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"data"]))
else:
Log.error("Expecting `json` parameter")
response = post(url, **kwargs)
c = response.content
try:
details = mo_json.json2value(convert.utf82unicode(c))
except Exception as e:
Log.error("Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error("Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request(b'put', url, **kwargs))
def patch(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'patch', url, **kwargs))
def delete(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding="utf8", flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith(".gz"):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error("Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
def __iter__(self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close()
| kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'get', url, **kwargs)) | identifier_body |
http.py | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"[email protected]"}
# }}
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
import mo_json
from pyLibrary import convert
from mo_logs.exceptions import Except
from mo_logs import Log
from mo_dots import Data, coalesce, wrap, set_default, unwrap
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
from mo_math import Math
from jx_python import jx
from mo_threads import Thread, Lock
from mo_threads import Till
from mo_times.durations import Duration
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error("Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error("Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, unicode):
del headers[k]
if isinstance(v, unicode):
headers[k.encode("ascii")] = v.encode("ascii")
else:
headers[k.encode("ascii")] = v
elif isinstance(v, unicode):
headers[k] = v.encode("ascii")
def get(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'get', url, **kwargs))
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
c = response.all_content
return mo_json.json2value(convert.utf82unicode(c))
def options(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault(b'allow_redirects', False)
kwargs[b"stream"] = True
return HttpResponse(request(b'head', url, **kwargs))
def post(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request(b'delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if b"json" in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"json"]))
elif b'data' in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"data"]))
else: | c = response.content
try:
details = mo_json.json2value(convert.utf82unicode(c))
except Exception as e:
Log.error("Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error("Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request(b'put', url, **kwargs))
def patch(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'patch', url, **kwargs))
def delete(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding="utf8", flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith(".gz"):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error("Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
def __iter__(self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close() | Log.error("Expecting `json` parameter")
response = post(url, **kwargs) | random_line_split |
http.py | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"[email protected]"}
# }}
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
import mo_json
from pyLibrary import convert
from mo_logs.exceptions import Except
from mo_logs import Log
from mo_dots import Data, coalesce, wrap, set_default, unwrap
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
from mo_math import Math
from jx_python import jx
from mo_threads import Thread, Lock
from mo_threads import Till
from mo_times.durations import Duration
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error("Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error("Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, unicode):
del headers[k]
if isinstance(v, unicode):
headers[k.encode("ascii")] = v.encode("ascii")
else:
headers[k.encode("ascii")] = v
elif isinstance(v, unicode):
headers[k] = v.encode("ascii")
def get(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'get', url, **kwargs))
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
c = response.all_content
return mo_json.json2value(convert.utf82unicode(c))
def options(url, **kwargs):
kwargs.setdefault(b'allow_redirects', True)
kwargs[b"stream"] = True
return HttpResponse(request(b'options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault(b'allow_redirects', False)
kwargs[b"stream"] = True
return HttpResponse(request(b'head', url, **kwargs))
def post(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request(b'delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if b"json" in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"json"]))
elif b'data' in kwargs:
kwargs[b"data"] = convert.unicode2utf8(convert.value2json(kwargs[b"data"]))
else:
Log.error("Expecting `json` parameter")
response = post(url, **kwargs)
c = response.content
try:
details = mo_json.json2value(convert.utf82unicode(c))
except Exception as e:
Log.error("Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error("Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request(b'put', url, **kwargs))
def patch(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'patch', url, **kwargs))
def delete(url, **kwargs):
kwargs[b"stream"] = True
return HttpResponse(request(b'delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding="utf8", flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith(".gz"):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error("Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
def | (self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close()
| __iter__ | identifier_name |
layers.py | # coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
|
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer.
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_conv2d',
reuse=None):
"""Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the weight scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor`.
units: An int of the last dimension size of output.
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` where the last dimension has size `units`.
"""
x = tf.layers.flatten(x)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.dense(
x,
kernel_shape[1],
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=(x.shape.as_list()[-1], units),
bias_shape=(units,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
| return images | conditional_block |
layers.py | # coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer.
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_conv2d',
reuse=None):
|
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor`.
units: An int of the last dimension size of output.
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` where the last dimension has size `units`.
"""
x = tf.layers.flatten(x)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.dense(
x,
kernel_shape[1],
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=(x.shape.as_list()[-1], units),
bias_shape=(units,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
| """Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the weight scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling) | identifier_body |
layers.py | # coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer. | Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_conv2d',
reuse=None):
"""Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the weight scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor`.
units: An int of the last dimension size of output.
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` where the last dimension has size `units`.
"""
x = tf.layers.flatten(x)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.dense(
x,
kernel_shape[1],
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=(x.shape.as_list()[-1], units),
bias_shape=(units,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling) |
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
| random_line_split |
layers.py | # coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for a progressive GAN model.
This module contains basic building blocks to build a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples import compat_utils
def pixel_norm(images, epsilon=1.0e-8):
"""Pixel normalization.
For each pixel a[i,j,k] of image in HWC format, normalize its value to
b[i,j,k] = a[i,j,k] / SQRT(SUM_k(a[i,j,k]^2) / C + eps).
Args:
images: A 4D `Tensor` of NHWC format.
epsilon: A small positive number to avoid division by zero.
Returns:
A 4D `Tensor` with pixel-wise normalized channels.
"""
return images * tf.math.rsqrt(
tf.reduce_mean(input_tensor=tf.square(images), axis=3, keepdims=True) +
epsilon)
def _get_validated_scale(scale):
"""Returns the scale guaranteed to be a positive integer."""
scale = int(scale)
if scale <= 0:
raise ValueError('`scale` must be a positive integer.')
return scale
def downscale(images, scale):
"""Box downscaling of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` down scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.nn_avg_pool2d(
input=images,
ksize=[1, scale, scale, 1],
strides=[1, scale, scale, 1],
padding='VALID')
def upscale(images, scale):
"""Box upscaling (also called nearest neighbors) of images.
Args:
images: A 4D `Tensor` in NHWC format.
scale: A positive integer scale.
Returns:
A 4D `Tensor` of `images` up scaled by a factor `scale`.
Raises:
ValueError: If `scale` is not a positive integer.
"""
scale = _get_validated_scale(scale)
if scale == 1:
return images
return compat_utils.batch_to_space(
input=tf.tile(images, [scale**2, 1, 1, 1]),
crops=[[0, 0], [0, 0]],
block_shape=scale)
def minibatch_mean_stddev(x):
"""Computes the standard deviation average.
This is used by the discriminator as a form of batch discrimination.
Args:
x: A `Tensor` for which to compute the standard deviation average. The first
dimension must be batch size.
Returns:
A scalar `Tensor` which is the mean variance of variable x.
"""
mean, var = tf.nn.moments(x=x, axes=[0])
del mean
return tf.reduce_mean(input_tensor=tf.sqrt(var))
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(input=tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
def he_initializer_scale(shape, slope=1.0):
"""The scale of He neural network initializer.
Args:
shape: A list of ints representing the dimensions of a tensor.
slope: A float representing the slope of the ReLu following the layer.
Returns:
A float of he initializer scale.
"""
fan_in = np.prod(shape[:-1])
return np.sqrt(2. / ((1. + slope**2) * fan_in))
def _custom_layer_impl(apply_kernel, kernel_shape, bias_shape, activation,
he_initializer_slope, use_weight_scaling):
"""Helper function to implement custom_xxx layer.
Args:
apply_kernel: A function that transforms kernel to output.
kernel_shape: An integer tuple or list of the kernel shape.
bias_shape: An integer tuple or list of the bias shape.
activation: An activation function to be applied. None means no activation.
he_initializer_slope: A float slope for the He initializer.
use_weight_scaling: Whether to apply weight scaling.
Returns:
A `Tensor` computed as apply_kernel(kernel) + bias where kernel is a
`Tensor` variable with shape `kernel_shape`, bias is a `Tensor` variable
with shape `bias_shape`.
"""
kernel_scale = he_initializer_scale(kernel_shape, he_initializer_slope)
init_scale, post_scale = kernel_scale, 1.0
if use_weight_scaling:
init_scale, post_scale = post_scale, init_scale
kernel_initializer = tf.random_normal_initializer(stddev=init_scale)
bias = tf.get_variable(
'bias', shape=bias_shape, initializer=tf.zeros_initializer())
output = post_scale * apply_kernel(kernel_shape, kernel_initializer) + bias
if activation is not None:
output = activation(output)
return output
def custom_conv2d(x,
filters,
kernel_size,
strides=(1, 1),
padding='SAME',
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_conv2d',
reuse=None):
"""Custom conv2d layer.
In comparison with tf.layers.conv2d this implementation use the He initializer
to initialize convolutional kernel and the weight scaling trick (if
`use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor` of NHWC format.
filters: An int of output channels.
kernel_size: An integer or a int tuple of [kernel_height, kernel_width].
strides: A list of strides.
padding: One of "VALID" or "SAME".
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` of NHWC format where the last dimension has size `filters`.
"""
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * 2
kernel_size = list(kernel_size)
def | (kernel_shape, kernel_initializer):
return tf.layers.conv2d(
x,
filters=filters,
kernel_size=kernel_shape[0:2],
strides=strides,
padding=padding,
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=kernel_size + [x.shape.as_list()[3], filters],
bias_shape=(filters,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
def custom_dense(x,
units,
activation=None,
he_initializer_slope=1.0,
use_weight_scaling=True,
scope='custom_dense',
reuse=None):
"""Custom dense layer.
In comparison with tf.layers.dense This implementation use the He
initializer to initialize weights and the weight scaling trick
(if `use_weight_scaling` is True) to equalize learning rates. See
https://arxiv.org/abs/1710.10196 for more details.
Args:
x: A `Tensor`.
units: An int of the last dimension size of output.
activation: An activation function to be applied. None means no activation.
Defaults to None.
he_initializer_slope: A float slope for the He initializer. Defaults to 1.0.
use_weight_scaling: Whether to apply weight scaling. Defaults to True.
scope: A string or variable scope.
reuse: Whether to reuse the weights. Defaults to None.
Returns:
A `Tensor` where the last dimension has size `units`.
"""
x = tf.layers.flatten(x)
def _apply_kernel(kernel_shape, kernel_initializer):
return tf.layers.dense(
x,
kernel_shape[1],
use_bias=False,
kernel_initializer=kernel_initializer)
with tf.variable_scope(scope, reuse=reuse):
return _custom_layer_impl(
_apply_kernel,
kernel_shape=(x.shape.as_list()[-1], units),
bias_shape=(units,),
activation=activation,
he_initializer_slope=he_initializer_slope,
use_weight_scaling=use_weight_scaling)
| _apply_kernel | identifier_name |
vs-11.0-scc-files.py | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-11.0-scc-files.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can generate Visual Studio 11.0 project (.vcxproj) and
solution (.sln) files that contain SCC information and look correct.
"""
import os
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['11.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_11_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_11_0
SConscript_contents = """\
env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='11.0',
CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
CPPPATH=['inc1', 'inc2'],
MSVS_SCC_CONNECTION_ROOT='.',
MSVS_SCC_PROVIDER='MSSCCI:Perforce SCM',
MSVS_SCC_PROJECT_NAME='Perforce Project')
testsrc = ['test1.cpp', 'test2.cpp']
testincs = ['sdk_dir\sdk.h']
testlocalincs = ['test.h']
testresources = ['test.rc']
testmisc = ['readme.txt']
env.MSVSProject(target = 'Test.vcxproj',
srcs = testsrc,
incs = testincs,
localincs = testlocalincs,
resources = testresources,
misc = testmisc,
buildtarget = 'Test.exe',
variant = 'Release')
"""
expected_sln_sccinfo = """\
\tGlobalSection(SourceCodeControl) = preSolution
\t\tSccNumberOfProjects = 2
\t\tSccProjectName0 = Perforce\u0020Project
\t\tSccLocalPath0 = .
\t\tSccProvider0 = MSSCCI:Perforce\u0020SCM
\t\tCanCheckoutShared = true
\t\tSccProjectUniqueName1 = Test.vcxproj
\t\tSccLocalPath1 = .
\t\tCanCheckoutShared = true | \t\t<SccProjectName>Perforce Project</SccProjectName>
\t\t<SccLocalPath>.</SccLocalPath>
\t\t<SccProvider>MSSCCI:Perforce SCM</SccProvider>
"""
test.write('SConstruct', SConscript_contents)
test.run(arguments="Test.vcxproj")
test.must_exist(test.workpath('Test.vcxproj'))
vcproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '11.0', None, 'SConstruct',
vcproj_sccinfo=expected_vcproj_sccinfo)
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '11.0', None, 'SConstruct',
sln_sccinfo=expected_sln_sccinfo)
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | \t\tSccProjectFilePathRelativizedFromConnection1 = .\\\\
\tEndGlobalSection
"""
expected_vcproj_sccinfo = """\ | random_line_split |
main.py | """ """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
def | ():
parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit")
parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views")
parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
print(sqlacodegen.version)
return
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
args.noclasses, args.alwaysclasses, args.nosequences)
generator.render(outfile)
| main | identifier_name |
main.py | """ """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
def main():
parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit")
parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views")
parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
print(sqlacodegen.version)
return
if not args.url:
|
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
args.noclasses, args.alwaysclasses, args.nosequences)
generator.render(outfile)
| print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return | conditional_block |
main.py | """ """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
def main():
parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit") | parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
print(sqlacodegen.version)
return
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
args.noclasses, args.alwaysclasses, args.nosequences)
generator.render(outfile) | parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views") | random_line_split |
main.py | """ """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
def main():
| parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit")
parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views")
parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
print(sqlacodegen.version)
return
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
args.noclasses, args.alwaysclasses, args.nosequences)
generator.render(outfile) | identifier_body |
|
xhr_backend.ts | import {ConnectionBackend, Connection} from '../interfaces';
import {ReadyStates, RequestMethods, ResponseTypes} from '../enums';
import {Request} from '../static_request';
import {Response} from '../static_response';
import {ResponseOptions, BaseResponseOptions} from '../base_response_options';
import {Injectable} from 'angular2/src/core/di';
import {BrowserXhr} from './browser_xhr';
import {EventEmitter, ObservableWrapper} from 'angular2/src/core/facade/async';
import {isPresent} from 'angular2/src/core/facade/lang';
/**
* Creates connections using `XMLHttpRequest`. Given a fully-qualified
* request, an `XHRConnection` will immediately create an `XMLHttpRequest` object and send the
* request.
*
* This class would typically not be created or interacted with directly inside applications, though
* the {@link MockConnection} may be interacted with in tests.
*/
export class XHRConnection implements Connection {
request: Request;
/**
* Response {@link EventEmitter} which emits a single {@link Response} value on load event of
* `XMLHttpRequest`.
*/
response: EventEmitter; // TODO: Make generic of <Response>;
readyState: ReadyStates;
private _xhr; // TODO: make type XMLHttpRequest, pending resolution of
// https://github.com/angular/ts2dart/issues/230
constructor(req: Request, browserXHR: BrowserXhr, baseResponseOptions?: ResponseOptions) {
this.request = req;
this.response = new EventEmitter();
this._xhr = browserXHR.build();
// TODO(jeffbcross): implement error listening/propagation
this._xhr.open(RequestMethods[req.method].toUpperCase(), req.url);
this._xhr.addEventListener('load', (_) => {
// responseText is the old-school way of retrieving response (supported by IE8 & 9)
// response/responseType properties were introduced in XHR Level2 spec (supported by IE10)
let response = isPresent(this._xhr.response) ? this._xhr.response : this._xhr.responseText;
// normalize IE9 bug (http://bugs.jquery.com/ticket/1450)
let status = this._xhr.status === 1223 ? 204 : this._xhr.status;
// fix status code when it is 0 (0 status is undocumented).
// Occurs when accessing file resources or on Android 4.1 stock browser
// while retrieving files from application cache.
if (status === 0) {
status = response ? 200 : 0;
}
var responseOptions = new ResponseOptions({body: response, status: status});
if (isPresent(baseResponseOptions)) {
responseOptions = baseResponseOptions.merge(responseOptions);
}
ObservableWrapper.callNext(this.response, new Response(responseOptions));
// TODO(gdi2290): defer complete if array buffer until done
ObservableWrapper.callReturn(this.response);
});
this._xhr.addEventListener('error', (err) => {
var responseOptions = new ResponseOptions({body: err, type: ResponseTypes.Error});
if (isPresent(baseResponseOptions)) {
responseOptions = baseResponseOptions.merge(responseOptions);
}
ObservableWrapper.callThrow(this.response, new Response(responseOptions));
});
// TODO(jeffbcross): make this more dynamic based on body type
if (isPresent(req.headers)) {
req.headers.forEach((value, name) => { this._xhr.setRequestHeader(name, value); });
}
this._xhr.send(this.request.text());
}
/**
* Calls abort on the underlying XMLHttpRequest.
*/
dispose(): void { this._xhr.abort(); }
}
/**
* Creates {@link XHRConnection} instances.
*
* This class would typically not be used by end users, but could be
* overridden if a different backend implementation should be used,
* such as in a node backend.
*
* #Example
*
* ```
* import {Http, MyNodeBackend, HTTP_BINDINGS, BaseRequestOptions} from 'angular2/http';
* @Component({
* viewBindings: [
* HTTP_BINDINGS,
* bind(Http).toFactory((backend, options) => {
* return new Http(backend, options);
* }, [MyNodeBackend, BaseRequestOptions])]
* })
* class MyComponent {
* constructor(http:Http) {
* http('people.json').toRx().subscribe(res => this.people = res.json());
* }
* }
* ```
*
**/
@Injectable()
export class XHRBackend implements ConnectionBackend {
| (private _browserXHR: BrowserXhr, private _baseResponseOptions: ResponseOptions) {}
createConnection(request: Request): XHRConnection {
return new XHRConnection(request, this._browserXHR, this._baseResponseOptions);
}
}
| constructor | identifier_name |
xhr_backend.ts | import {ConnectionBackend, Connection} from '../interfaces';
import {ReadyStates, RequestMethods, ResponseTypes} from '../enums';
import {Request} from '../static_request';
import {Response} from '../static_response';
import {ResponseOptions, BaseResponseOptions} from '../base_response_options';
import {Injectable} from 'angular2/src/core/di';
import {BrowserXhr} from './browser_xhr';
import {EventEmitter, ObservableWrapper} from 'angular2/src/core/facade/async';
import {isPresent} from 'angular2/src/core/facade/lang';
/**
* Creates connections using `XMLHttpRequest`. Given a fully-qualified
* request, an `XHRConnection` will immediately create an `XMLHttpRequest` object and send the
* request.
*
* This class would typically not be created or interacted with directly inside applications, though
* the {@link MockConnection} may be interacted with in tests.
*/
export class XHRConnection implements Connection {
request: Request;
/**
* Response {@link EventEmitter} which emits a single {@link Response} value on load event of
* `XMLHttpRequest`.
*/
response: EventEmitter; // TODO: Make generic of <Response>;
readyState: ReadyStates;
private _xhr; // TODO: make type XMLHttpRequest, pending resolution of
// https://github.com/angular/ts2dart/issues/230
constructor(req: Request, browserXHR: BrowserXhr, baseResponseOptions?: ResponseOptions) {
this.request = req;
this.response = new EventEmitter();
this._xhr = browserXHR.build();
// TODO(jeffbcross): implement error listening/propagation
this._xhr.open(RequestMethods[req.method].toUpperCase(), req.url);
this._xhr.addEventListener('load', (_) => {
// responseText is the old-school way of retrieving response (supported by IE8 & 9)
// response/responseType properties were introduced in XHR Level2 spec (supported by IE10)
let response = isPresent(this._xhr.response) ? this._xhr.response : this._xhr.responseText;
// normalize IE9 bug (http://bugs.jquery.com/ticket/1450)
let status = this._xhr.status === 1223 ? 204 : this._xhr.status;
// fix status code when it is 0 (0 status is undocumented).
// Occurs when accessing file resources or on Android 4.1 stock browser
// while retrieving files from application cache.
if (status === 0) {
status = response ? 200 : 0;
}
var responseOptions = new ResponseOptions({body: response, status: status});
if (isPresent(baseResponseOptions)) |
ObservableWrapper.callNext(this.response, new Response(responseOptions));
// TODO(gdi2290): defer complete if array buffer until done
ObservableWrapper.callReturn(this.response);
});
this._xhr.addEventListener('error', (err) => {
var responseOptions = new ResponseOptions({body: err, type: ResponseTypes.Error});
if (isPresent(baseResponseOptions)) {
responseOptions = baseResponseOptions.merge(responseOptions);
}
ObservableWrapper.callThrow(this.response, new Response(responseOptions));
});
// TODO(jeffbcross): make this more dynamic based on body type
if (isPresent(req.headers)) {
req.headers.forEach((value, name) => { this._xhr.setRequestHeader(name, value); });
}
this._xhr.send(this.request.text());
}
/**
* Calls abort on the underlying XMLHttpRequest.
*/
dispose(): void { this._xhr.abort(); }
}
/**
* Creates {@link XHRConnection} instances.
*
* This class would typically not be used by end users, but could be
* overridden if a different backend implementation should be used,
* such as in a node backend.
*
* #Example
*
* ```
* import {Http, MyNodeBackend, HTTP_BINDINGS, BaseRequestOptions} from 'angular2/http';
* @Component({
* viewBindings: [
* HTTP_BINDINGS,
* bind(Http).toFactory((backend, options) => {
* return new Http(backend, options);
* }, [MyNodeBackend, BaseRequestOptions])]
* })
* class MyComponent {
* constructor(http:Http) {
* http('people.json').toRx().subscribe(res => this.people = res.json());
* }
* }
* ```
*
**/
@Injectable()
export class XHRBackend implements ConnectionBackend {
constructor(private _browserXHR: BrowserXhr, private _baseResponseOptions: ResponseOptions) {}
createConnection(request: Request): XHRConnection {
return new XHRConnection(request, this._browserXHR, this._baseResponseOptions);
}
}
| {
responseOptions = baseResponseOptions.merge(responseOptions);
} | conditional_block |
xhr_backend.ts | import {ConnectionBackend, Connection} from '../interfaces';
import {ReadyStates, RequestMethods, ResponseTypes} from '../enums';
import {Request} from '../static_request';
import {Response} from '../static_response';
import {ResponseOptions, BaseResponseOptions} from '../base_response_options';
import {Injectable} from 'angular2/src/core/di';
import {BrowserXhr} from './browser_xhr';
import {EventEmitter, ObservableWrapper} from 'angular2/src/core/facade/async';
import {isPresent} from 'angular2/src/core/facade/lang';
/**
* Creates connections using `XMLHttpRequest`. Given a fully-qualified
* request, an `XHRConnection` will immediately create an `XMLHttpRequest` object and send the
* request.
*
* This class would typically not be created or interacted with directly inside applications, though
* the {@link MockConnection} may be interacted with in tests.
*/
export class XHRConnection implements Connection {
request: Request;
/**
* Response {@link EventEmitter} which emits a single {@link Response} value on load event of
* `XMLHttpRequest`.
*/
response: EventEmitter; // TODO: Make generic of <Response>;
readyState: ReadyStates;
private _xhr; // TODO: make type XMLHttpRequest, pending resolution of
// https://github.com/angular/ts2dart/issues/230
constructor(req: Request, browserXHR: BrowserXhr, baseResponseOptions?: ResponseOptions) {
this.request = req;
this.response = new EventEmitter();
this._xhr = browserXHR.build();
// TODO(jeffbcross): implement error listening/propagation
this._xhr.open(RequestMethods[req.method].toUpperCase(), req.url);
this._xhr.addEventListener('load', (_) => {
// responseText is the old-school way of retrieving response (supported by IE8 & 9)
// response/responseType properties were introduced in XHR Level2 spec (supported by IE10)
let response = isPresent(this._xhr.response) ? this._xhr.response : this._xhr.responseText;
// normalize IE9 bug (http://bugs.jquery.com/ticket/1450)
let status = this._xhr.status === 1223 ? 204 : this._xhr.status;
// fix status code when it is 0 (0 status is undocumented).
// Occurs when accessing file resources or on Android 4.1 stock browser
// while retrieving files from application cache.
if (status === 0) {
status = response ? 200 : 0;
}
var responseOptions = new ResponseOptions({body: response, status: status});
if (isPresent(baseResponseOptions)) { | }
ObservableWrapper.callNext(this.response, new Response(responseOptions));
// TODO(gdi2290): defer complete if array buffer until done
ObservableWrapper.callReturn(this.response);
});
this._xhr.addEventListener('error', (err) => {
var responseOptions = new ResponseOptions({body: err, type: ResponseTypes.Error});
if (isPresent(baseResponseOptions)) {
responseOptions = baseResponseOptions.merge(responseOptions);
}
ObservableWrapper.callThrow(this.response, new Response(responseOptions));
});
// TODO(jeffbcross): make this more dynamic based on body type
if (isPresent(req.headers)) {
req.headers.forEach((value, name) => { this._xhr.setRequestHeader(name, value); });
}
this._xhr.send(this.request.text());
}
/**
* Calls abort on the underlying XMLHttpRequest.
*/
dispose(): void { this._xhr.abort(); }
}
/**
* Creates {@link XHRConnection} instances.
*
* This class would typically not be used by end users, but could be
* overridden if a different backend implementation should be used,
* such as in a node backend.
*
* #Example
*
* ```
* import {Http, MyNodeBackend, HTTP_BINDINGS, BaseRequestOptions} from 'angular2/http';
* @Component({
* viewBindings: [
* HTTP_BINDINGS,
* bind(Http).toFactory((backend, options) => {
* return new Http(backend, options);
* }, [MyNodeBackend, BaseRequestOptions])]
* })
* class MyComponent {
* constructor(http:Http) {
* http('people.json').toRx().subscribe(res => this.people = res.json());
* }
* }
* ```
*
**/
@Injectable()
export class XHRBackend implements ConnectionBackend {
constructor(private _browserXHR: BrowserXhr, private _baseResponseOptions: ResponseOptions) {}
createConnection(request: Request): XHRConnection {
return new XHRConnection(request, this._browserXHR, this._baseResponseOptions);
}
} | responseOptions = baseResponseOptions.merge(responseOptions); | random_line_split |
channel_list.rs | use ui::ncurses::*;
use ui::{RIGHT_PANEL_WIDTH, CMD_ENTRY_HEIGHT, STATS_PANEL_HEIGHT, Position, Size};
use ui::window::BorderWindow;
pub struct ChannelList {
window: BorderWindow,
channels: Vec<String>,
}
impl ChannelList {
pub fn new(size: Size) -> ChannelList {
let window = BorderWindow::new(
Position::new(size.width - RIGHT_PANEL_WIDTH, 0),
Size::new(RIGHT_PANEL_WIDTH, size.height - STATS_PANEL_HEIGHT - CMD_ENTRY_HEIGHT),
Size::new(1, 1));
ChannelList {
window: window,
channels: Vec::new(),
}
}
fn display_channel(&self, index: i32, name: &str) {
mvwaddstr(self.window.inner.id,
index,
1,
&format!("{} - {}", index + 1, name));
wrefresh(self.window.inner.id);
}
pub fn add_channel(&mut self, name: &str) {
self.channels.push(name.to_string());
self.display_channel(self.channels.len() as i32 - 1, name);
}
pub fn remove_channel(&mut self, name: &str) -> bool {
let index = self.channels.iter().position(|ref s| s.as_str() == name);
match index {
Some(index) => {
self.channels.remove(index);
// TODO: Separate this redraw code into its own function once this method is actually used
wclear(self.window.inner.id);
for (index, channel) in self.channels.iter().by_ref().enumerate() {
self.display_channel(index as i32, &channel);
}
true | },
None => false,
}
}
} | random_line_split |
|
channel_list.rs | use ui::ncurses::*;
use ui::{RIGHT_PANEL_WIDTH, CMD_ENTRY_HEIGHT, STATS_PANEL_HEIGHT, Position, Size};
use ui::window::BorderWindow;
pub struct ChannelList {
window: BorderWindow,
channels: Vec<String>,
}
impl ChannelList {
pub fn new(size: Size) -> ChannelList {
let window = BorderWindow::new(
Position::new(size.width - RIGHT_PANEL_WIDTH, 0),
Size::new(RIGHT_PANEL_WIDTH, size.height - STATS_PANEL_HEIGHT - CMD_ENTRY_HEIGHT),
Size::new(1, 1));
ChannelList {
window: window,
channels: Vec::new(),
}
}
fn display_channel(&self, index: i32, name: &str) |
pub fn add_channel(&mut self, name: &str) {
self.channels.push(name.to_string());
self.display_channel(self.channels.len() as i32 - 1, name);
}
pub fn remove_channel(&mut self, name: &str) -> bool {
let index = self.channels.iter().position(|ref s| s.as_str() == name);
match index {
Some(index) => {
self.channels.remove(index);
// TODO: Separate this redraw code into its own function once this method is actually used
wclear(self.window.inner.id);
for (index, channel) in self.channels.iter().by_ref().enumerate() {
self.display_channel(index as i32, &channel);
}
true
},
None => false,
}
}
} | {
mvwaddstr(self.window.inner.id,
index,
1,
&format!("{} - {}", index + 1, name));
wrefresh(self.window.inner.id);
} | identifier_body |
channel_list.rs | use ui::ncurses::*;
use ui::{RIGHT_PANEL_WIDTH, CMD_ENTRY_HEIGHT, STATS_PANEL_HEIGHT, Position, Size};
use ui::window::BorderWindow;
pub struct ChannelList {
window: BorderWindow,
channels: Vec<String>,
}
impl ChannelList {
pub fn new(size: Size) -> ChannelList {
let window = BorderWindow::new(
Position::new(size.width - RIGHT_PANEL_WIDTH, 0),
Size::new(RIGHT_PANEL_WIDTH, size.height - STATS_PANEL_HEIGHT - CMD_ENTRY_HEIGHT),
Size::new(1, 1));
ChannelList {
window: window,
channels: Vec::new(),
}
}
fn display_channel(&self, index: i32, name: &str) {
mvwaddstr(self.window.inner.id,
index,
1,
&format!("{} - {}", index + 1, name));
wrefresh(self.window.inner.id);
}
pub fn add_channel(&mut self, name: &str) {
self.channels.push(name.to_string());
self.display_channel(self.channels.len() as i32 - 1, name);
}
pub fn | (&mut self, name: &str) -> bool {
let index = self.channels.iter().position(|ref s| s.as_str() == name);
match index {
Some(index) => {
self.channels.remove(index);
// TODO: Separate this redraw code into its own function once this method is actually used
wclear(self.window.inner.id);
for (index, channel) in self.channels.iter().by_ref().enumerate() {
self.display_channel(index as i32, &channel);
}
true
},
None => false,
}
}
} | remove_channel | identifier_name |
FZH04.py | import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def | (S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):
return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th))
def RcovEul(del0,z):
return RphysoR0(del0,z)*(1+z)
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
| dlnBFdlnS0 | identifier_name |
FZH04.py | import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def dlnBFdlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):
return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th))
def RcovEul(del0,z):
return RphysoR0(del0,z)*(1+z)
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
|
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
| S0max = sig0(m2R(M0min)) | conditional_block |
FZH04.py | import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def dlnBFdlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):
return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th))
def RcovEul(del0,z):
|
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
| return RphysoR0(del0,z)*(1+z) | identifier_body |
FZH04.py | import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad,tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from sigmas import sig0
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
Planck13 = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
cosmo = Planck13
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
dmS = n.load('m2S.npz')
MLtemp,SLtemp = dmS['arr_0'],dmS['arr_1']
fs2m = interp1d(SLtemp,MLtemp,kind='cubic')
def S2M(S):
return fs2m(S)
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth
######################## SIZE DISTRIBUTION #############################
####################### FZH04 ##############################
def fFZH(S,zeta,B0,B1):
res = B0/n.sqrt(2*n.pi*S**3)*n.exp(-B0**2/2/S-B0*B1-B1**2*S/2)
return res
def BFZH(S0,deltac,smin,K):
return deltac-n.sqrt(2*(smin-S0))*K
def BFZHlin(S0,deltac,smin,K):
b0 = deltac-K*n.sqrt(2*smin)
b1 = K/n.sqrt(2*smin)
return b0+b1*S0
def dlnBFdlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZH(S0+d,deltac,smin,K), BFZH(S0,deltac,smin,K), BFZH(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
def dlnBFlindlnS0(S0,deltac,smin,K,d=0.001):
Bp,Bo,Bm = BFZHlin(S0+d,deltac,smin,K), BFZHlin(S0,deltac,smin,K), BFZHlin(S0-d,deltac,smin,K)
return S0/Bo*(Bp-Bm)/2/d
##### m_min
dDoZ = n.load('theta.npz')
thetal,DoZl = dDoZ['arr_0'],dDoZ['arr_1']
ftheta = interp1d(DoZl,thetal,kind='cubic')
def theta(z,del0):
return ftheta(del0/(1+z))
def RphysoR0(del0,z):
th = theta(z,del0)
return 3./10/del0*(1-n.cos(th))
def RcovEul(del0,z):
return RphysoR0(del0,z)*(1+z)
def dlinSdlnR(lnR,d=0.001):
res = (n.log(sig0(n.exp(lnR+d)))-n.log(sig0(n.exp(lnR-d))))/d/2
return n.abs(res)
################################## MAIN ######################################
for z in [12., 16.]:
PLOT = True
zeta = 40.
K = scipy.special.erfinv(1-1./zeta)
Tvir = 1.E4
#z = 12.
deltac = Deltac(z)
mm = mmin(z)
M0min = zeta*mm
RLmin,R0min = m2R(mm), m2R(M0min)
print 'R',RLmin
smin = sig0(RLmin)
Rmin = R0min*RcovEul(deltac,z) #S0=smin, so del0=deltac; convertion from lagragian to comoving eulerian
####### FZH04 #######
bFZH0 = deltac-K*n.sqrt(2*smin)
bFZH1 = K/n.sqrt(2*smin)
#bFZH = deltac-n.sqrt(2*(smin-S0))*K
#bFZHlin = bFZH0+bFZH1*S0
def dlnRdlnR0(lnR0,S0,del0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
th = theta(z,del0)
thfactor = 1-3./2*th*(th-n.sin(th))/(1-n.cos(th))**2
res = 1-dlinSdlnR(lnR0)*dlnBFdlnS0(S0,deltac,smin,K)*thfactor
return res
def V0dndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
return S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
def VdndlnR0(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZHlin(S0,deltac,smin,K)
#lnR0 = n.log(n.exp(lnR)/RcovEul(del0,z))
VoV0 = (RcovEul(del0,z))**3
#return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0) | def VdndlnR(lnR0):
S0 = sig0(n.exp(lnR0))
del0 = BFZH(S0,deltac,smin,K)
VoV0 = (RcovEul(del0,z))**3
return VoV0/dlnRdlnR0(lnR0,S0,del0)*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0)
if True:
print 'computing z=',z
#Q = quad(lambda lnR: VdndlnR(lnR),n.log(Rmin),3.5) #integrated over eulerian coordinates
Q = quad(lambda lnR0: VdndlnR0(lnR0),n.log(R0min),3.5) #integrated over eulerian coordinates
print 'Q=',Q
Q = Q[0]
#######
lnR0 = n.arange(n.log(R0min),3,0.03)
S0list = []
for lnr0 in lnR0: S0list.append(sig0(n.exp(lnr0)))
S0list = n.array(S0list)
#lnR = n.arange(n.log(Rmin),3,0.1)
del0list = BFZH(S0list,deltac,smin,K)
lnR = n.log(n.exp(lnR0)*RcovEul(del0list,z))
normsize = []
for lnr0 in lnR0:
res = VdndlnR(lnr0)/Q
print n.exp(lnr0),res
normsize.append(res)
p.figure(1)
p.semilogx(n.exp(lnR),normsize,label=str(z))
p.legend()
if True:
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.figure(2)
p.plot(S0,bFZH,'b', label=str(z))
p.plot(S0,bFZHlin,'b.-')
p.ylim([0,20])
p.xlim([0,25])
p.legend()
if False: #for benchmark
for i in range(1000):
S0max = sig0(m2R(M0min))
S0 = n.arange(0,S0max,0.2)
bFZH = deltac-n.sqrt(2*(smin-S0))*K
bFZHlin = bFZH0+bFZH1*S0
p.show()
################
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
###########################
# dlist = n.linspace(8,10,10)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
# if False:
# p.figure()
# p.plot(res[1],res[2])
# p.show()
#tplquad(All,mm,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z)) | return VoV0*S0*fFZH(S0,zeta,bFZH0,bFZH1)*dlinSdlnR(lnR0) | random_line_split |
extract_QEq_params.py | #!/usr/bin/env python
"""
Extract atomic parameters for QEq potential.
Usage:
extract_bvs_params.py [options] DATA_FILE NAME [NAME...]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "180112"
out_Coulomb= 'in.params.Coulomb'
def | (fname):
params = {}
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] == '#':
continue
data = line.split()
idx = int(data[0])
name = data[1]
ie1 = float(data[2])
ie2 = float(data[3])
ea = float(data[4])
rad = float(data[5])
en = float(data[6])
params[name] = [idx,name,ie1,ie2,ea,rad,en]
return params
def anum_to_range(atomic_number):
"""
Calculate and return the lower and upper limits of the charge of given atomic number.
"""
nstates = (0,2,8,18,32,50)
if atomic_number > 86:
raise ValueError('Atomic number greater than 86 is not available.')
elif atomic_number <= sum_array(nstates,1):
n = 1
elif atomic_number <= sum_array(nstates,2):
n = 2
elif atomic_number <= sum_array(nstates,3):
n = 3
elif atomic_number <= sum_array(nstates,4):
n = 4
elif atomic_number <= sum_array(nstates,5):
n = 5
else:
raise ValueError('Atomic number is something wrong: ',atomic_number)
freedom = (0,2,6,10,14,18,22)
nval = atomic_number - sum_array(nstates,n-1)
if nval < sum_array(freedom,1):
l = 1
elif nval < sum_array(freedom,2):
l = 2
elif nval < sum_array(freedom,3):
l = 3
elif nval < sum_array(freedom,4):
l = 4
else:
l = 5
if not l <= n:
raise ValueError('not l<=n')
print('anum,n,l,nval=',atomic_number,n,l,nval)
nseat = sum_array(nstates,n) -sum_array(nstates,n-1)
nseatopen = nseat - nval
for il in range(l+1,n+1):
nseatopen -= freedom[il]
print('nseat,nseatopen=',nseat,nseatopen)
qlow = -float(nseatopen)
qup = float(min(nval, freedom[l]+freedom[l-1]))
return qlow,qup
def sum_array(array,n):
if len(array) < n+1:
raise ValueError('len(array) < n')
s = 0
for i in range(n+1):
s += array[i]
return s
def write_Coulomb_params(fname,params,specorder):
with open(fname,'w') as f:
#...declare it is 'variable_charge' Coulomb
f.write(' variable_charge \n')
n = 0
e0 = 0.0
for k in specorder:
n += 1
p = params[k]
anum = p[0]
name = p[1]
ie = p[2]
ea = -p[4]
xi = (ie+ea)/2
J = (ie-ea)
qlow,qup = anum_to_range(anum)
f.write('{0:4d} {1:5s}'.format(n,name)
+' {0:9.3f} {1:9.3f} {2:9.4f}'.format(xi,J,e0)
+' {0:5.1f} {1:5.1f}\n'.format(qlow,qup))
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['DATA_FILE']
specorder = [ name for name in args['NAME'] ]
params = read_data_file(fname)
write_Coulomb_params(out_Coulomb,params,specorder)
| read_data_file | identifier_name |
extract_QEq_params.py | #!/usr/bin/env python
"""
Extract atomic parameters for QEq potential.
Usage:
extract_bvs_params.py [options] DATA_FILE NAME [NAME...]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "180112"
out_Coulomb= 'in.params.Coulomb'
def read_data_file(fname):
params = {}
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] == '#':
continue
data = line.split()
idx = int(data[0])
name = data[1]
ie1 = float(data[2])
ie2 = float(data[3])
ea = float(data[4])
rad = float(data[5])
en = float(data[6])
params[name] = [idx,name,ie1,ie2,ea,rad,en]
return params
def anum_to_range(atomic_number):
"""
Calculate and return the lower and upper limits of the charge of given atomic number.
"""
nstates = (0,2,8,18,32,50)
if atomic_number > 86:
raise ValueError('Atomic number greater than 86 is not available.')
elif atomic_number <= sum_array(nstates,1):
n = 1
elif atomic_number <= sum_array(nstates,2):
n = 2
elif atomic_number <= sum_array(nstates,3):
n = 3
elif atomic_number <= sum_array(nstates,4):
n = 4
elif atomic_number <= sum_array(nstates,5):
n = 5
else:
raise ValueError('Atomic number is something wrong: ',atomic_number)
freedom = (0,2,6,10,14,18,22)
nval = atomic_number - sum_array(nstates,n-1)
if nval < sum_array(freedom,1):
l = 1
elif nval < sum_array(freedom,2):
l = 2
elif nval < sum_array(freedom,3):
l = 3
elif nval < sum_array(freedom,4):
|
else:
l = 5
if not l <= n:
raise ValueError('not l<=n')
print('anum,n,l,nval=',atomic_number,n,l,nval)
nseat = sum_array(nstates,n) -sum_array(nstates,n-1)
nseatopen = nseat - nval
for il in range(l+1,n+1):
nseatopen -= freedom[il]
print('nseat,nseatopen=',nseat,nseatopen)
qlow = -float(nseatopen)
qup = float(min(nval, freedom[l]+freedom[l-1]))
return qlow,qup
def sum_array(array,n):
if len(array) < n+1:
raise ValueError('len(array) < n')
s = 0
for i in range(n+1):
s += array[i]
return s
def write_Coulomb_params(fname,params,specorder):
with open(fname,'w') as f:
#...declare it is 'variable_charge' Coulomb
f.write(' variable_charge \n')
n = 0
e0 = 0.0
for k in specorder:
n += 1
p = params[k]
anum = p[0]
name = p[1]
ie = p[2]
ea = -p[4]
xi = (ie+ea)/2
J = (ie-ea)
qlow,qup = anum_to_range(anum)
f.write('{0:4d} {1:5s}'.format(n,name)
+' {0:9.3f} {1:9.3f} {2:9.4f}'.format(xi,J,e0)
+' {0:5.1f} {1:5.1f}\n'.format(qlow,qup))
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['DATA_FILE']
specorder = [ name for name in args['NAME'] ]
params = read_data_file(fname)
write_Coulomb_params(out_Coulomb,params,specorder)
| l = 4 | conditional_block |
extract_QEq_params.py | #!/usr/bin/env python
"""
Extract atomic parameters for QEq potential.
Usage:
extract_bvs_params.py [options] DATA_FILE NAME [NAME...]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "180112"
out_Coulomb= 'in.params.Coulomb'
def read_data_file(fname):
params = {}
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] == '#':
continue
data = line.split()
idx = int(data[0])
name = data[1]
ie1 = float(data[2])
ie2 = float(data[3])
ea = float(data[4])
rad = float(data[5])
en = float(data[6])
params[name] = [idx,name,ie1,ie2,ea,rad,en]
return params
def anum_to_range(atomic_number):
"""
Calculate and return the lower and upper limits of the charge of given atomic number.
"""
nstates = (0,2,8,18,32,50)
if atomic_number > 86:
raise ValueError('Atomic number greater than 86 is not available.')
elif atomic_number <= sum_array(nstates,1):
n = 1
elif atomic_number <= sum_array(nstates,2):
n = 2
elif atomic_number <= sum_array(nstates,3):
n = 3
elif atomic_number <= sum_array(nstates,4):
n = 4
elif atomic_number <= sum_array(nstates,5):
n = 5
else:
raise ValueError('Atomic number is something wrong: ',atomic_number)
freedom = (0,2,6,10,14,18,22)
nval = atomic_number - sum_array(nstates,n-1)
if nval < sum_array(freedom,1):
l = 1
elif nval < sum_array(freedom,2):
l = 2
elif nval < sum_array(freedom,3):
l = 3
elif nval < sum_array(freedom,4):
l = 4
else:
l = 5
if not l <= n:
raise ValueError('not l<=n')
print('anum,n,l,nval=',atomic_number,n,l,nval)
nseat = sum_array(nstates,n) -sum_array(nstates,n-1)
nseatopen = nseat - nval
for il in range(l+1,n+1):
nseatopen -= freedom[il]
print('nseat,nseatopen=',nseat,nseatopen)
qlow = -float(nseatopen)
qup = float(min(nval, freedom[l]+freedom[l-1]))
return qlow,qup
def sum_array(array,n):
if len(array) < n+1:
raise ValueError('len(array) < n')
s = 0
for i in range(n+1):
s += array[i]
return s
def write_Coulomb_params(fname,params,specorder):
|
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['DATA_FILE']
specorder = [ name for name in args['NAME'] ]
params = read_data_file(fname)
write_Coulomb_params(out_Coulomb,params,specorder)
| with open(fname,'w') as f:
#...declare it is 'variable_charge' Coulomb
f.write(' variable_charge \n')
n = 0
e0 = 0.0
for k in specorder:
n += 1
p = params[k]
anum = p[0]
name = p[1]
ie = p[2]
ea = -p[4]
xi = (ie+ea)/2
J = (ie-ea)
qlow,qup = anum_to_range(anum)
f.write('{0:4d} {1:5s}'.format(n,name)
+' {0:9.3f} {1:9.3f} {2:9.4f}'.format(xi,J,e0)
+' {0:5.1f} {1:5.1f}\n'.format(qlow,qup)) | identifier_body |
extract_QEq_params.py | #!/usr/bin/env python
"""
Extract atomic parameters for QEq potential.
Usage:
extract_bvs_params.py [options] DATA_FILE NAME [NAME...]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
__author__ = "RYO KOBAYASHI"
__version__ = "180112"
| params = {}
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if line[0] == '#':
continue
data = line.split()
idx = int(data[0])
name = data[1]
ie1 = float(data[2])
ie2 = float(data[3])
ea = float(data[4])
rad = float(data[5])
en = float(data[6])
params[name] = [idx,name,ie1,ie2,ea,rad,en]
return params
def anum_to_range(atomic_number):
"""
Calculate and return the lower and upper limits of the charge of given atomic number.
"""
nstates = (0,2,8,18,32,50)
if atomic_number > 86:
raise ValueError('Atomic number greater than 86 is not available.')
elif atomic_number <= sum_array(nstates,1):
n = 1
elif atomic_number <= sum_array(nstates,2):
n = 2
elif atomic_number <= sum_array(nstates,3):
n = 3
elif atomic_number <= sum_array(nstates,4):
n = 4
elif atomic_number <= sum_array(nstates,5):
n = 5
else:
raise ValueError('Atomic number is something wrong: ',atomic_number)
freedom = (0,2,6,10,14,18,22)
nval = atomic_number - sum_array(nstates,n-1)
if nval < sum_array(freedom,1):
l = 1
elif nval < sum_array(freedom,2):
l = 2
elif nval < sum_array(freedom,3):
l = 3
elif nval < sum_array(freedom,4):
l = 4
else:
l = 5
if not l <= n:
raise ValueError('not l<=n')
print('anum,n,l,nval=',atomic_number,n,l,nval)
nseat = sum_array(nstates,n) -sum_array(nstates,n-1)
nseatopen = nseat - nval
for il in range(l+1,n+1):
nseatopen -= freedom[il]
print('nseat,nseatopen=',nseat,nseatopen)
qlow = -float(nseatopen)
qup = float(min(nval, freedom[l]+freedom[l-1]))
return qlow,qup
def sum_array(array,n):
if len(array) < n+1:
raise ValueError('len(array) < n')
s = 0
for i in range(n+1):
s += array[i]
return s
def write_Coulomb_params(fname,params,specorder):
with open(fname,'w') as f:
#...declare it is 'variable_charge' Coulomb
f.write(' variable_charge \n')
n = 0
e0 = 0.0
for k in specorder:
n += 1
p = params[k]
anum = p[0]
name = p[1]
ie = p[2]
ea = -p[4]
xi = (ie+ea)/2
J = (ie-ea)
qlow,qup = anum_to_range(anum)
f.write('{0:4d} {1:5s}'.format(n,name)
+' {0:9.3f} {1:9.3f} {2:9.4f}'.format(xi,J,e0)
+' {0:5.1f} {1:5.1f}\n'.format(qlow,qup))
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['DATA_FILE']
specorder = [ name for name in args['NAME'] ]
params = read_data_file(fname)
write_Coulomb_params(out_Coulomb,params,specorder) | out_Coulomb= 'in.params.Coulomb'
def read_data_file(fname): | random_line_split |
plural.py | # PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <[email protected]>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum"],
"um-a-classical" : ["aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium", "memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a" : ["aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion", "phenomenon", "prolegomenon"],
"a-ae" : ["alga", "alumna", "vertebra"],
"a-ae-classical" : ["abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna", "medusa", "nebula", "nova", "parabola"],
"en-ina-classical" : ["foramen", "lumen", "stamen"],
"a-ata-classical" : ["anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical" : ["clitoris", "iris"],
"us-i-classical" : ["focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus", "torus", "umbilicus", "uterus"],
"us-us-classical" : ["apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus", "sinus", "status"],
"o-i-classical" : ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical" : ["afreet", "afrit", "efreet"],
"-im-classical" : ["cherub", "goy", "seraph"],
"o-os" : ["albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco", "generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto", "manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals" : ["Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
NOUN = "noun"
ADJECTIVE = "adjective"
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes".
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives
# remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe.
# (dog's -> dogs')
if (len(word) > 0 and word[-1] == ",") or \
(len(word) > 1 and word[-2:] == "'s"):
owner = word.rstrip("'s")
owners = plural(owner, classical, custom)
if owners[-1] == "s":
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], plural(words[0], classical, custom))
elif words[1] in plural_prepositions:
return word.replace(words[0], plural(words[0], classical, custom))
else:
return word.replace(words[-1], plural(words[-1], classical, custom))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos == ADJECTIVE:
n = [0, 1]
import re
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule,
# or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
# A rule relating to a specific category of words
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
return word
#print plural("part-of-speech")
#print plural("child")
#print plural("dog's")
#print plural("wolf")
#print plural("bear")
#print plural("kitchen knife")
#print plural("octopus", classical=True)
#print plural("matrix", classical=True)
#print plural("matrix", classical=False)
#print plural("my", pos=ADJECTIVE)
def | (word, classical=True, custom={}):
return plural(word, NOUN, classical, custom)
def adjective_plural(word, classical=True, custom={}):
return plural(word, ADJECTIVE, classical, custom) | noun_plural | identifier_name |
plural.py | # PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <[email protected]>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum"],
"um-a-classical" : ["aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium", "memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a" : ["aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion", "phenomenon", "prolegomenon"],
"a-ae" : ["alga", "alumna", "vertebra"],
"a-ae-classical" : ["abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna", "medusa", "nebula", "nova", "parabola"],
"en-ina-classical" : ["foramen", "lumen", "stamen"], | "-i-classical" : ["afreet", "afrit", "efreet"],
"-im-classical" : ["cherub", "goy", "seraph"],
"o-os" : ["albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco", "generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto", "manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals" : ["Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
NOUN = "noun"
ADJECTIVE = "adjective"
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes".
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives
# remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe.
# (dog's -> dogs')
if (len(word) > 0 and word[-1] == ",") or \
(len(word) > 1 and word[-2:] == "'s"):
owner = word.rstrip("'s")
owners = plural(owner, classical, custom)
if owners[-1] == "s":
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], plural(words[0], classical, custom))
elif words[1] in plural_prepositions:
return word.replace(words[0], plural(words[0], classical, custom))
else:
return word.replace(words[-1], plural(words[-1], classical, custom))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos == ADJECTIVE:
n = [0, 1]
import re
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule,
# or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
# A rule relating to a specific category of words
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
return word
#print plural("part-of-speech")
#print plural("child")
#print plural("dog's")
#print plural("wolf")
#print plural("bear")
#print plural("kitchen knife")
#print plural("octopus", classical=True)
#print plural("matrix", classical=True)
#print plural("matrix", classical=False)
#print plural("my", pos=ADJECTIVE)
def noun_plural(word, classical=True, custom={}):
return plural(word, NOUN, classical, custom)
def adjective_plural(word, classical=True, custom={}):
return plural(word, ADJECTIVE, classical, custom) | "a-ata-classical" : ["anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical" : ["clitoris", "iris"],
"us-i-classical" : ["focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus", "torus", "umbilicus", "uterus"],
"us-us-classical" : ["apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus", "sinus", "status"],
"o-i-classical" : ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"], | random_line_split |
plural.py | # PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <[email protected]>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum"],
"um-a-classical" : ["aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium", "memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a" : ["aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion", "phenomenon", "prolegomenon"],
"a-ae" : ["alga", "alumna", "vertebra"],
"a-ae-classical" : ["abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna", "medusa", "nebula", "nova", "parabola"],
"en-ina-classical" : ["foramen", "lumen", "stamen"],
"a-ata-classical" : ["anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical" : ["clitoris", "iris"],
"us-i-classical" : ["focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus", "torus", "umbilicus", "uterus"],
"us-us-classical" : ["apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus", "sinus", "status"],
"o-i-classical" : ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical" : ["afreet", "afrit", "efreet"],
"-im-classical" : ["cherub", "goy", "seraph"],
"o-os" : ["albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco", "generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto", "manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals" : ["Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
NOUN = "noun"
ADJECTIVE = "adjective"
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes".
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives
# remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe.
# (dog's -> dogs')
if (len(word) > 0 and word[-1] == ",") or \
(len(word) > 1 and word[-2:] == "'s"):
owner = word.rstrip("'s")
owners = plural(owner, classical, custom)
if owners[-1] == "s":
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], plural(words[0], classical, custom))
elif words[1] in plural_prepositions:
return word.replace(words[0], plural(words[0], classical, custom))
else:
return word.replace(words[-1], plural(words[-1], classical, custom))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos == ADJECTIVE:
n = [0, 1]
import re
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule,
# or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
# A rule relating to a specific category of words
if category != None:
|
return word
#print plural("part-of-speech")
#print plural("child")
#print plural("dog's")
#print plural("wolf")
#print plural("bear")
#print plural("kitchen knife")
#print plural("octopus", classical=True)
#print plural("matrix", classical=True)
#print plural("matrix", classical=False)
#print plural("my", pos=ADJECTIVE)
def noun_plural(word, classical=True, custom={}):
return plural(word, NOUN, classical, custom)
def adjective_plural(word, classical=True, custom={}):
return plural(word, ADJECTIVE, classical, custom) | if word in plural_categories[category] and (not classic or (classic and classical)):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word) | conditional_block |
plural.py | # PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <[email protected]>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum"],
"um-a-classical" : ["aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium", "memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a" : ["aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion", "phenomenon", "prolegomenon"],
"a-ae" : ["alga", "alumna", "vertebra"],
"a-ae-classical" : ["abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna", "medusa", "nebula", "nova", "parabola"],
"en-ina-classical" : ["foramen", "lumen", "stamen"],
"a-ata-classical" : ["anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical" : ["clitoris", "iris"],
"us-i-classical" : ["focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus", "torus", "umbilicus", "uterus"],
"us-us-classical" : ["apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus", "sinus", "status"],
"o-i-classical" : ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical" : ["afreet", "afrit", "efreet"],
"-im-classical" : ["cherub", "goy", "seraph"],
"o-os" : ["albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco", "generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto", "manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals" : ["Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
NOUN = "noun"
ADJECTIVE = "adjective"
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes".
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives
# remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe.
# (dog's -> dogs')
if (len(word) > 0 and word[-1] == ",") or \
(len(word) > 1 and word[-2:] == "'s"):
owner = word.rstrip("'s")
owners = plural(owner, classical, custom)
if owners[-1] == "s":
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], plural(words[0], classical, custom))
elif words[1] in plural_prepositions:
return word.replace(words[0], plural(words[0], classical, custom))
else:
return word.replace(words[-1], plural(words[-1], classical, custom))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos == ADJECTIVE:
n = [0, 1]
import re
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule,
# or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
# A rule relating to a specific category of words
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
return word
#print plural("part-of-speech")
#print plural("child")
#print plural("dog's")
#print plural("wolf")
#print plural("bear")
#print plural("kitchen knife")
#print plural("octopus", classical=True)
#print plural("matrix", classical=True)
#print plural("matrix", classical=False)
#print plural("my", pos=ADJECTIVE)
def noun_plural(word, classical=True, custom={}):
return plural(word, NOUN, classical, custom)
def adjective_plural(word, classical=True, custom={}):
| return plural(word, ADJECTIVE, classical, custom) | identifier_body |
|
hat.ts | import type { ColorGroup } from '../static-types';
export const hat: ColorGroup = {
black: 'rgba(38, 46, 51, 1)',
blue01: 'rgba(101, 201, 255, 1)',
blue02: 'rgba(81, 153, 228, 1)',
blue03: 'rgba(37, 85, 124, 1)',
gray01: 'rgba(229, 229, 229, 1)',
gray02: 'rgba(146, 149, 152, 1)',
heather: 'rgba(60, 79, 92, 1)',
pastelBlue: 'rgba(177, 226, 255, 1)',
pastelGreen: 'rgba(167, 255, 196, 1)',
pastelOrange: 'rgba(255, 222, 181, 1)',
pastelRed: 'rgba(255, 175, 185, 1)',
pastelYellow: 'rgba(255, 255, 177, 1)',
pink: 'rgba(255, 72, 142, 1)',
red: 'rgba(255, 92, 92, 1)',
white: 'rgba(255, 255, 255, 1)', | }; | random_line_split |
|
runtests.py | #!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and either isn't in an application in INSTALLED_APPS or else was imported
# before its application was loaded."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
|
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures))
| state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures | identifier_body |
runtests.py | #!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and either isn't in an application in INSTALLED_APPS or else was imported
# before its application was loaded."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test]) | teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures)) | if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found') | random_line_split |
runtests.py | #!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and either isn't in an application in INSTALLED_APPS or else was imported
# before its application was loaded."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
| sys.exit(bool(failures)) | conditional_block |
|
runtests.py | #!/usr/bin/env python
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
from argparse import ArgumentParser
import django
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, TransactionTestCase
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and either isn't in an application in INSTALLED_APPS or else was imported
# before its application was loaded."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
# GIS tests are in nested apps
('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')),
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
if verbosity >= 1:
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
# Remove the following line in Django 1.10.
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
# Remove the following line in Django 1.10.
settings.TEMPLATE_DIRS = [TEMPLATE_DIR]
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'contenttypes_tests.migrations',
}
log_config = DEFAULT_LOGGING
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.webdesign will be removed in Django 1.10.',
RemovedInDjango110Warning
)
warnings.filterwarnings(
'ignore',
'The GeoManager class is deprecated.',
RemovedInDjango20Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
try:
# Removing the temporary TMPDIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TMPDIR))
except OSError:
print('Failed to remove temp directory: %s' % TMPDIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql):
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def | (paired_test, options, test_labels):
state = setup(options.verbosity, test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = ArgumentParser(description="Run the Django test suite.")
parser.add_argument('modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".')
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb', default=False,
help='Tells Django to preserve the test database between runs.')
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.')
parser.add_argument('--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_argument('--pair',
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_argument('--reverse', action='store_true', default=False,
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.')
parser.add_argument('--liveserver',
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_argument(
'--selenium', action='store_true', dest='selenium', default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql', default=False,
help='Turn on the SQL query logger within tests')
options = parser.parse_args()
# mock is a required dependency
try:
from django.test import mock # NOQA
except ImportError:
print(
"Please install test dependencies first: \n"
"$ pip install -r requirements/py%s.txt" % sys.version_info.major
)
sys.exit(1)
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, options.modules)
elif options.pair:
paired_tests(options.pair, options, options.modules)
else:
failures = django_tests(options.verbosity, options.interactive,
options.failfast, options.keepdb,
options.reverse, options.modules,
options.debug_sql)
if failures:
sys.exit(bool(failures))
| paired_tests | identifier_name |
validitystate.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ValidityStateBinding;
use dom::bindings::codegen::Bindings::ValidityStateBinding::ValidityStateMethods;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::window::Window;
// https://html.spec.whatwg.org/multipage/#validity-states
#[derive(JSTraceable)]
#[derive(HeapSizeOf)]
pub enum ValidityStatus {
ValueMissing,
TypeMismatch,
PatternMismatch,
TooLong,
TooShort,
RangeUnderflow,
RangeOverflow,
StepMismatch,
BadInput,
CustomError,
Valid
}
bitflags!{
pub flags ValidationFlags: u32 {
const VALUE_MISSING = 0b0000000001,
const TYPE_MISMATCH = 0b0000000010,
const PATTERN_MISMATCH = 0b0000000100,
const TOO_LONG = 0b0000001000,
const TOO_SHORT = 0b0000010000,
const RANGE_UNDERFLOW = 0b0000100000,
const RANGE_OVERFLOW = 0b0001000000,
const STEP_MISMATCH = 0b0010000000,
const BAD_INPUT = 0b0100000000,
const CUSTOM_ERROR = 0b1000000000,
}
}
// https://html.spec.whatwg.org/multipage/#validitystate
#[dom_struct]
pub struct ValidityState {
reflector_: Reflector,
element: JS<Element>,
state: ValidityStatus
}
| state: ValidityStatus::Valid
}
}
pub fn new(window: &Window, element: &Element) -> Root<ValidityState> {
reflect_dom_object(box ValidityState::new_inherited(element),
window,
ValidityStateBinding::Wrap)
}
}
impl ValidityStateMethods for ValidityState {
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valuemissing
fn ValueMissing(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-typemismatch
fn TypeMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-patternmismatch
fn PatternMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-toolong
fn TooLong(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-tooshort
fn TooShort(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeunderflow
fn RangeUnderflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeoverflow
fn RangeOverflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-stepmismatch
fn StepMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-badinput
fn BadInput(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-customerror
fn CustomError(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valid
fn Valid(&self) -> bool {
false
}
} | impl ValidityState {
fn new_inherited(element: &Element) -> ValidityState {
ValidityState {
reflector_: Reflector::new(),
element: JS::from_ref(element), | random_line_split |
validitystate.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ValidityStateBinding;
use dom::bindings::codegen::Bindings::ValidityStateBinding::ValidityStateMethods;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::window::Window;
// https://html.spec.whatwg.org/multipage/#validity-states
#[derive(JSTraceable)]
#[derive(HeapSizeOf)]
pub enum ValidityStatus {
ValueMissing,
TypeMismatch,
PatternMismatch,
TooLong,
TooShort,
RangeUnderflow,
RangeOverflow,
StepMismatch,
BadInput,
CustomError,
Valid
}
bitflags!{
pub flags ValidationFlags: u32 {
const VALUE_MISSING = 0b0000000001,
const TYPE_MISMATCH = 0b0000000010,
const PATTERN_MISMATCH = 0b0000000100,
const TOO_LONG = 0b0000001000,
const TOO_SHORT = 0b0000010000,
const RANGE_UNDERFLOW = 0b0000100000,
const RANGE_OVERFLOW = 0b0001000000,
const STEP_MISMATCH = 0b0010000000,
const BAD_INPUT = 0b0100000000,
const CUSTOM_ERROR = 0b1000000000,
}
}
// https://html.spec.whatwg.org/multipage/#validitystate
#[dom_struct]
pub struct ValidityState {
reflector_: Reflector,
element: JS<Element>,
state: ValidityStatus
}
impl ValidityState {
fn new_inherited(element: &Element) -> ValidityState {
ValidityState {
reflector_: Reflector::new(),
element: JS::from_ref(element),
state: ValidityStatus::Valid
}
}
pub fn new(window: &Window, element: &Element) -> Root<ValidityState> {
reflect_dom_object(box ValidityState::new_inherited(element),
window,
ValidityStateBinding::Wrap)
}
}
impl ValidityStateMethods for ValidityState {
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valuemissing
fn ValueMissing(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-typemismatch
fn TypeMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-patternmismatch
fn PatternMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-toolong
fn TooLong(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-tooshort
fn TooShort(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeunderflow
fn RangeUnderflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeoverflow
fn RangeOverflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-stepmismatch
fn StepMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-badinput
fn BadInput(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-customerror
fn CustomError(&self) -> bool |
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valid
fn Valid(&self) -> bool {
false
}
}
| {
false
} | identifier_body |
validitystate.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ValidityStateBinding;
use dom::bindings::codegen::Bindings::ValidityStateBinding::ValidityStateMethods;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::window::Window;
// https://html.spec.whatwg.org/multipage/#validity-states
#[derive(JSTraceable)]
#[derive(HeapSizeOf)]
pub enum ValidityStatus {
ValueMissing,
TypeMismatch,
PatternMismatch,
TooLong,
TooShort,
RangeUnderflow,
RangeOverflow,
StepMismatch,
BadInput,
CustomError,
Valid
}
bitflags!{
pub flags ValidationFlags: u32 {
const VALUE_MISSING = 0b0000000001,
const TYPE_MISMATCH = 0b0000000010,
const PATTERN_MISMATCH = 0b0000000100,
const TOO_LONG = 0b0000001000,
const TOO_SHORT = 0b0000010000,
const RANGE_UNDERFLOW = 0b0000100000,
const RANGE_OVERFLOW = 0b0001000000,
const STEP_MISMATCH = 0b0010000000,
const BAD_INPUT = 0b0100000000,
const CUSTOM_ERROR = 0b1000000000,
}
}
// https://html.spec.whatwg.org/multipage/#validitystate
#[dom_struct]
pub struct ValidityState {
reflector_: Reflector,
element: JS<Element>,
state: ValidityStatus
}
impl ValidityState {
fn new_inherited(element: &Element) -> ValidityState {
ValidityState {
reflector_: Reflector::new(),
element: JS::from_ref(element),
state: ValidityStatus::Valid
}
}
pub fn new(window: &Window, element: &Element) -> Root<ValidityState> {
reflect_dom_object(box ValidityState::new_inherited(element),
window,
ValidityStateBinding::Wrap)
}
}
impl ValidityStateMethods for ValidityState {
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valuemissing
fn | (&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-typemismatch
fn TypeMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-patternmismatch
fn PatternMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-toolong
fn TooLong(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-tooshort
fn TooShort(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeunderflow
fn RangeUnderflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-rangeoverflow
fn RangeOverflow(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-stepmismatch
fn StepMismatch(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-badinput
fn BadInput(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-customerror
fn CustomError(&self) -> bool {
false
}
// https://html.spec.whatwg.org/multipage/#dom-validitystate-valid
fn Valid(&self) -> bool {
false
}
}
| ValueMissing | identifier_name |
udt.rs | extern crate cassandra;
use cassandra::*;
fn main() {
let mut cluster = Cluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
match cluster.connect() {
Ok(ref mut session) => {
let schema = session.get_schema();
session.execute(
"CREATE KEYSPACE examples WITH replication = \
{ 'class': 'SimpleStrategy', 'replication_factor': '3' }",
0
);
session.execute(
"CREATE TYPE examples.phone_numbers (phone1 int, phone2 int)",
0
);
session.execute(
"CREATE TYPE examples.address \
(street text, city text, zip int, phone set<frozen<phone_numbers>>)"
,0
);
session.execute(
"CREATE TABLE examples.udt (id timeuuid, address frozen<address>, PRIMARY KEY(id))",
0
);
insert_into_udt(&session, schema).unwrap();
select_from_udt(&session).unwrap();
session.close().wait().unwrap();
}
err => println!("{:?}", err),
}
}
fn select_from_udt(session: &Session) -> Result<(), CassandraError> {
let query = "SELECT * FROM examples.udt";
let statement = Statement::new(query, 0);
let mut future = session.execute_statement(&statement);
match future.wait() {
Err(err) => panic!("Error: {:?}", err),
Ok(result) => {
for row in result.iter() {
let id_value = row.get_column_by_name("id");
let address_value = row.get_column_by_name("address");
let fields_iter = try!(address_value.use_type_iter());
let id_str = try!(id_value.get_uuid()).to_string();
println!("id {}", id_str); | match field.1.get_type() {
ValueType::VARCHAR => println!("{}", try!(field.1.get_string())),
ValueType::INT => println!("{}", try!(field.1.get_int32())),
ValueType::SET =>
for phone_numbers in try!(field.1.as_set_iterator()) {
for phone_number in try!(phone_numbers.as_user_type_iterator()) {
let phone_number_value = phone_number.1;
println!("{}", phone_number_value);
}
},
other => panic!("Unsupported type: {:?}", other),
}
}
}
Ok(())
}
}
}
fn insert_into_udt(session: &Session) -> Result<(), CassandraError> {
let query = "INSERT INTO examples.udt (id, address) VALUES (?, ?)";
let mut statement = Statement::new(query, 2);
let uuid_gen = UuidGen::new();
let udt_address = schema.get_udt("examples", "address");
let udt_phone = cass_keyspace_meta_user_type_by_name(&schema, "examples", "phone_numbers");
let id = uuid_gen.get_time();
let id_str = id.to_string();
let mut address = UserType::new(udt_address);
let mut phone = Set::new(2);
let mut phone_numbers = UserType::new(udt_phone);
phone_numbers.set_int32_by_name("phone1", 0 + 1).unwrap();
phone_numbers.set_int32_by_name("phone2", 0 + 2).unwrap();
phone.append_user_type(phone_numbers).unwrap();
address.set_string_by_name("street", &id_str).unwrap();
address.set_int32_by_name("zip", id.0.time_and_version as i32).unwrap();
address.set_collection_by_name("phone", phone).unwrap();
statement.bind(0, id).unwrap();
statement.bind_user_type(1, address).unwrap();
let mut future = session.execute_statement(&statement);
match future.wait() {
Ok(_) => Ok(()),
Err(err) => panic!("Error: {:?}", err),
}
} | for field in fields_iter {
println!("{}", field.0); | random_line_split |
udt.rs | extern crate cassandra;
use cassandra::*;
fn main() |
fn select_from_udt(session: &Session) -> Result<(), CassandraError> {
let query = "SELECT * FROM examples.udt";
let statement = Statement::new(query, 0);
let mut future = session.execute_statement(&statement);
match future.wait() {
Err(err) => panic!("Error: {:?}", err),
Ok(result) => {
for row in result.iter() {
let id_value = row.get_column_by_name("id");
let address_value = row.get_column_by_name("address");
let fields_iter = try!(address_value.use_type_iter());
let id_str = try!(id_value.get_uuid()).to_string();
println!("id {}", id_str);
for field in fields_iter {
println!("{}", field.0);
match field.1.get_type() {
ValueType::VARCHAR => println!("{}", try!(field.1.get_string())),
ValueType::INT => println!("{}", try!(field.1.get_int32())),
ValueType::SET =>
for phone_numbers in try!(field.1.as_set_iterator()) {
for phone_number in try!(phone_numbers.as_user_type_iterator()) {
let phone_number_value = phone_number.1;
println!("{}", phone_number_value);
}
},
other => panic!("Unsupported type: {:?}", other),
}
}
}
Ok(())
}
}
}
fn insert_into_udt(session: &Session) -> Result<(), CassandraError> {
let query = "INSERT INTO examples.udt (id, address) VALUES (?, ?)";
let mut statement = Statement::new(query, 2);
let uuid_gen = UuidGen::new();
let udt_address = schema.get_udt("examples", "address");
let udt_phone = cass_keyspace_meta_user_type_by_name(&schema, "examples", "phone_numbers");
let id = uuid_gen.get_time();
let id_str = id.to_string();
let mut address = UserType::new(udt_address);
let mut phone = Set::new(2);
let mut phone_numbers = UserType::new(udt_phone);
phone_numbers.set_int32_by_name("phone1", 0 + 1).unwrap();
phone_numbers.set_int32_by_name("phone2", 0 + 2).unwrap();
phone.append_user_type(phone_numbers).unwrap();
address.set_string_by_name("street", &id_str).unwrap();
address.set_int32_by_name("zip", id.0.time_and_version as i32).unwrap();
address.set_collection_by_name("phone", phone).unwrap();
statement.bind(0, id).unwrap();
statement.bind_user_type(1, address).unwrap();
let mut future = session.execute_statement(&statement);
match future.wait() {
Ok(_) => Ok(()),
Err(err) => panic!("Error: {:?}", err),
}
}
| {
let mut cluster = Cluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
match cluster.connect() {
Ok(ref mut session) => {
let schema = session.get_schema();
session.execute(
"CREATE KEYSPACE examples WITH replication = \
{ 'class': 'SimpleStrategy', 'replication_factor': '3' }",
0
);
session.execute(
"CREATE TYPE examples.phone_numbers (phone1 int, phone2 int)",
0
);
session.execute(
"CREATE TYPE examples.address \
(street text, city text, zip int, phone set<frozen<phone_numbers>>)"
,0
);
session.execute(
"CREATE TABLE examples.udt (id timeuuid, address frozen<address>, PRIMARY KEY(id))",
0
);
insert_into_udt(&session, schema).unwrap();
select_from_udt(&session).unwrap();
session.close().wait().unwrap();
}
err => println!("{:?}", err),
}
} | identifier_body |
udt.rs | extern crate cassandra;
use cassandra::*;
fn main() {
let mut cluster = Cluster::new();
cluster.set_contact_points("127.0.0.1").unwrap();
match cluster.connect() {
Ok(ref mut session) => {
let schema = session.get_schema();
session.execute(
"CREATE KEYSPACE examples WITH replication = \
{ 'class': 'SimpleStrategy', 'replication_factor': '3' }",
0
);
session.execute(
"CREATE TYPE examples.phone_numbers (phone1 int, phone2 int)",
0
);
session.execute(
"CREATE TYPE examples.address \
(street text, city text, zip int, phone set<frozen<phone_numbers>>)"
,0
);
session.execute(
"CREATE TABLE examples.udt (id timeuuid, address frozen<address>, PRIMARY KEY(id))",
0
);
insert_into_udt(&session, schema).unwrap();
select_from_udt(&session).unwrap();
session.close().wait().unwrap();
}
err => println!("{:?}", err),
}
}
fn select_from_udt(session: &Session) -> Result<(), CassandraError> {
let query = "SELECT * FROM examples.udt";
let statement = Statement::new(query, 0);
let mut future = session.execute_statement(&statement);
match future.wait() {
Err(err) => panic!("Error: {:?}", err),
Ok(result) => {
for row in result.iter() {
let id_value = row.get_column_by_name("id");
let address_value = row.get_column_by_name("address");
let fields_iter = try!(address_value.use_type_iter());
let id_str = try!(id_value.get_uuid()).to_string();
println!("id {}", id_str);
for field in fields_iter {
println!("{}", field.0);
match field.1.get_type() {
ValueType::VARCHAR => println!("{}", try!(field.1.get_string())),
ValueType::INT => println!("{}", try!(field.1.get_int32())),
ValueType::SET =>
for phone_numbers in try!(field.1.as_set_iterator()) {
for phone_number in try!(phone_numbers.as_user_type_iterator()) {
let phone_number_value = phone_number.1;
println!("{}", phone_number_value);
}
},
other => panic!("Unsupported type: {:?}", other),
}
}
}
Ok(())
}
}
}
fn | (session: &Session) -> Result<(), CassandraError> {
let query = "INSERT INTO examples.udt (id, address) VALUES (?, ?)";
let mut statement = Statement::new(query, 2);
let uuid_gen = UuidGen::new();
let udt_address = schema.get_udt("examples", "address");
let udt_phone = cass_keyspace_meta_user_type_by_name(&schema, "examples", "phone_numbers");
let id = uuid_gen.get_time();
let id_str = id.to_string();
let mut address = UserType::new(udt_address);
let mut phone = Set::new(2);
let mut phone_numbers = UserType::new(udt_phone);
phone_numbers.set_int32_by_name("phone1", 0 + 1).unwrap();
phone_numbers.set_int32_by_name("phone2", 0 + 2).unwrap();
phone.append_user_type(phone_numbers).unwrap();
address.set_string_by_name("street", &id_str).unwrap();
address.set_int32_by_name("zip", id.0.time_and_version as i32).unwrap();
address.set_collection_by_name("phone", phone).unwrap();
statement.bind(0, id).unwrap();
statement.bind_user_type(1, address).unwrap();
let mut future = session.execute_statement(&statement);
match future.wait() {
Ok(_) => Ok(()),
Err(err) => panic!("Error: {:?}", err),
}
}
| insert_into_udt | identifier_name |
autoplay.js | const autoplay = {
props: {
/**
* Flag to enable autoplay
*/
autoplay: {
type: Boolean,
default: false
},
/**
* Time elapsed before next slide
*/
autoplayTimeout: {
type: Number,
default: 2000
},
/**
* Flag to pause autoplay on hover
*/
autoplayHoverPause: {
type: Boolean,
default: true
}
},
data () {
return {
autoplayInterval: null
}
},
destroyed () {
if (!process.server) {
this.pauseAutoplay()
this.$el.removeEventListener('mouseenter', this.pauseAutoplay)
this.$el.removeEventListener('mouseleave', this.startAutoplay)
}
},
methods: {
pauseAutoplay () | ,
startAutoplay () {
if (this.autoplay) {
this.autoplayInterval = setInterval(() => {
this.dir === 'ltr' ? this.goPrev() : this.goNext()
}, this.autoplayTimeout)
}
}
},
mounted () {
if (!process.server && this.autoplayHoverPause) {
this.$el.addEventListener('mouseenter', this.pauseAutoplay)
this.$el.addEventListener('mouseleave', this.startAutoplay)
this.startAutoplay()
}
}
}
export default autoplay
| {
if (this.autoplayInterval) {
this.autoplayInterval = clearInterval(this.autoplayInterval)
}
} | identifier_body |
autoplay.js | const autoplay = {
props: {
/**
* Flag to enable autoplay
*/
autoplay: {
type: Boolean,
default: false
},
/**
* Time elapsed before next slide
*/
autoplayTimeout: {
type: Number,
default: 2000
},
/**
* Flag to pause autoplay on hover
*/
autoplayHoverPause: {
type: Boolean,
default: true
}
},
data () {
return {
autoplayInterval: null
}
},
destroyed () {
if (!process.server) {
this.pauseAutoplay()
this.$el.removeEventListener('mouseenter', this.pauseAutoplay)
this.$el.removeEventListener('mouseleave', this.startAutoplay)
}
},
methods: {
pauseAutoplay () {
if (this.autoplayInterval) {
this.autoplayInterval = clearInterval(this.autoplayInterval)
}
},
startAutoplay () {
if (this.autoplay) {
this.autoplayInterval = setInterval(() => {
this.dir === 'ltr' ? this.goPrev() : this.goNext()
}, this.autoplayTimeout) | }
},
mounted () {
if (!process.server && this.autoplayHoverPause) {
this.$el.addEventListener('mouseenter', this.pauseAutoplay)
this.$el.addEventListener('mouseleave', this.startAutoplay)
this.startAutoplay()
}
}
}
export default autoplay | } | random_line_split |
autoplay.js | const autoplay = {
props: {
/**
* Flag to enable autoplay
*/
autoplay: {
type: Boolean,
default: false
},
/**
* Time elapsed before next slide
*/
autoplayTimeout: {
type: Number,
default: 2000
},
/**
* Flag to pause autoplay on hover
*/
autoplayHoverPause: {
type: Boolean,
default: true
}
},
data () {
return {
autoplayInterval: null
}
},
destroyed () {
if (!process.server) {
this.pauseAutoplay()
this.$el.removeEventListener('mouseenter', this.pauseAutoplay)
this.$el.removeEventListener('mouseleave', this.startAutoplay)
}
},
methods: {
pauseAutoplay () {
if (this.autoplayInterval) |
},
startAutoplay () {
if (this.autoplay) {
this.autoplayInterval = setInterval(() => {
this.dir === 'ltr' ? this.goPrev() : this.goNext()
}, this.autoplayTimeout)
}
}
},
mounted () {
if (!process.server && this.autoplayHoverPause) {
this.$el.addEventListener('mouseenter', this.pauseAutoplay)
this.$el.addEventListener('mouseleave', this.startAutoplay)
this.startAutoplay()
}
}
}
export default autoplay
| {
this.autoplayInterval = clearInterval(this.autoplayInterval)
} | conditional_block |
autoplay.js | const autoplay = {
props: {
/**
* Flag to enable autoplay
*/
autoplay: {
type: Boolean,
default: false
},
/**
* Time elapsed before next slide
*/
autoplayTimeout: {
type: Number,
default: 2000
},
/**
* Flag to pause autoplay on hover
*/
autoplayHoverPause: {
type: Boolean,
default: true
}
},
data () {
return {
autoplayInterval: null
}
},
| () {
if (!process.server) {
this.pauseAutoplay()
this.$el.removeEventListener('mouseenter', this.pauseAutoplay)
this.$el.removeEventListener('mouseleave', this.startAutoplay)
}
},
methods: {
pauseAutoplay () {
if (this.autoplayInterval) {
this.autoplayInterval = clearInterval(this.autoplayInterval)
}
},
startAutoplay () {
if (this.autoplay) {
this.autoplayInterval = setInterval(() => {
this.dir === 'ltr' ? this.goPrev() : this.goNext()
}, this.autoplayTimeout)
}
}
},
mounted () {
if (!process.server && this.autoplayHoverPause) {
this.$el.addEventListener('mouseenter', this.pauseAutoplay)
this.$el.addEventListener('mouseleave', this.startAutoplay)
this.startAutoplay()
}
}
}
export default autoplay
| destroyed | identifier_name |
jsonp.py | import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
self.set_status(404)
return
#data = self.request.body.decode('utf-8')
data = self.request.body
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
| return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200) | self.write("Broken JSON encoding.")
self.set_status(500) | random_line_split |
jsonp.py | import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
|
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
self.set_status(404)
return
#data = self.request.body.decode('utf-8')
data = self.request.body
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
| msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish() | identifier_body |
jsonp.py | import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
|
#data = self.request.body.decode('utf-8')
data = self.request.body
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
| self.set_status(404)
return | conditional_block |
jsonp.py | import urllib
from cyclone.web import asynchronous
from twisted.python import log
from sockjs.cyclone import proto
from sockjs.cyclone.transports import pollingbase
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id):
return
# Might get already detached because connection was closed in
# connectionMade
if not self.session:
return
if self.session.send_queue.is_empty():
self.session.start_heartbeat()
else:
self.session.flush()
def connectionLost(self, reason):
self.session.delayed_close()
def send_pack(self, message):
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type',
'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# FIXME
self.set_header('Etag', 'dummy')
self.write(msg)
self._detach()
self.safe_finish()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def | (self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None:
self.set_status(404)
return
#data = self.request.body.decode('utf-8')
data = self.request.body
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
log.msg('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = urllib.unquote_plus(data[2:])
if not data:
log.msg('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
log.msg('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.messagesReceived(messages)
except Exception:
log.msg('jsonp_send: messagesReceived() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
| post | identifier_name |
CardDef.ts | import {CardClass, CardSet, CardType, MultiClassGroup, Race, Rarity} from "./Enums";
import {cleanEnum} from "./helpers";
export default class | {
public attack: number;
public armor: number;
public cardClass: CardClass;
public cardSet: CardSet;
public collectionText: string;
public cost: number;
public costsHealth: boolean;
public elite: boolean;
public health: number;
public hideStats: boolean;
public id: string;
public name: string;
public multiClassGroup: MultiClassGroup;
public rarity: Rarity;
public race: Race;
public silenced: boolean;
public text: string;
public type: CardType;
constructor(props: any) {
this.attack = props.attack || 0;
this.armor = props.armor || 0;
this.cardClass = cleanEnum(props.cardClass, CardClass) as CardClass;
this.cardSet = cleanEnum(props.set, CardSet) as CardSet;
this.cost = props.cost || 0;
this.costsHealth = props.costsHealth || false;
this.elite = props.elite || false;
this.health = props.health || 0;
this.hideStats = props.hideStats || false;
this.multiClassGroup = cleanEnum(props.multiClassGroup, MultiClassGroup) as MultiClassGroup;
this.name = props.name || "";
this.race = cleanEnum(props.race, Race) as Race;
this.rarity = cleanEnum(props.rarity, Rarity) as Rarity;
this.silenced = props.silenced || false;
this.type = cleanEnum(props.type, CardType) as CardType;
if (this.type === CardType.WEAPON && props.durability) {
// Weapons alias health to durability
this.health = props.durability;
} else if (this.type === CardType.HERO && props.armor) {
// Hero health gem is Armor
this.health = props.armor;
}
this.collectionText = props.collectionText || "";
this.text = props.text || "";
}
}
| CardDef | identifier_name |
CardDef.ts | import {CardClass, CardSet, CardType, MultiClassGroup, Race, Rarity} from "./Enums";
import {cleanEnum} from "./helpers";
export default class CardDef {
public attack: number;
public armor: number;
public cardClass: CardClass;
public cardSet: CardSet;
public collectionText: string;
public cost: number;
public costsHealth: boolean;
public elite: boolean;
public health: number;
public hideStats: boolean;
public id: string;
public name: string;
public multiClassGroup: MultiClassGroup;
public rarity: Rarity;
public race: Race;
public silenced: boolean;
public text: string;
public type: CardType;
constructor(props: any) |
}
| {
this.attack = props.attack || 0;
this.armor = props.armor || 0;
this.cardClass = cleanEnum(props.cardClass, CardClass) as CardClass;
this.cardSet = cleanEnum(props.set, CardSet) as CardSet;
this.cost = props.cost || 0;
this.costsHealth = props.costsHealth || false;
this.elite = props.elite || false;
this.health = props.health || 0;
this.hideStats = props.hideStats || false;
this.multiClassGroup = cleanEnum(props.multiClassGroup, MultiClassGroup) as MultiClassGroup;
this.name = props.name || "";
this.race = cleanEnum(props.race, Race) as Race;
this.rarity = cleanEnum(props.rarity, Rarity) as Rarity;
this.silenced = props.silenced || false;
this.type = cleanEnum(props.type, CardType) as CardType;
if (this.type === CardType.WEAPON && props.durability) {
// Weapons alias health to durability
this.health = props.durability;
} else if (this.type === CardType.HERO && props.armor) {
// Hero health gem is Armor
this.health = props.armor;
}
this.collectionText = props.collectionText || "";
this.text = props.text || "";
} | identifier_body |
CardDef.ts | import {CardClass, CardSet, CardType, MultiClassGroup, Race, Rarity} from "./Enums";
import {cleanEnum} from "./helpers";
export default class CardDef {
public attack: number;
public armor: number;
public cardClass: CardClass;
public cardSet: CardSet;
public collectionText: string;
public cost: number;
public costsHealth: boolean;
public elite: boolean;
public health: number;
public hideStats: boolean;
public id: string; | public name: string;
public multiClassGroup: MultiClassGroup;
public rarity: Rarity;
public race: Race;
public silenced: boolean;
public text: string;
public type: CardType;
constructor(props: any) {
this.attack = props.attack || 0;
this.armor = props.armor || 0;
this.cardClass = cleanEnum(props.cardClass, CardClass) as CardClass;
this.cardSet = cleanEnum(props.set, CardSet) as CardSet;
this.cost = props.cost || 0;
this.costsHealth = props.costsHealth || false;
this.elite = props.elite || false;
this.health = props.health || 0;
this.hideStats = props.hideStats || false;
this.multiClassGroup = cleanEnum(props.multiClassGroup, MultiClassGroup) as MultiClassGroup;
this.name = props.name || "";
this.race = cleanEnum(props.race, Race) as Race;
this.rarity = cleanEnum(props.rarity, Rarity) as Rarity;
this.silenced = props.silenced || false;
this.type = cleanEnum(props.type, CardType) as CardType;
if (this.type === CardType.WEAPON && props.durability) {
// Weapons alias health to durability
this.health = props.durability;
} else if (this.type === CardType.HERO && props.armor) {
// Hero health gem is Armor
this.health = props.armor;
}
this.collectionText = props.collectionText || "";
this.text = props.text || "";
}
} | random_line_split |
|
CardDef.ts | import {CardClass, CardSet, CardType, MultiClassGroup, Race, Rarity} from "./Enums";
import {cleanEnum} from "./helpers";
export default class CardDef {
public attack: number;
public armor: number;
public cardClass: CardClass;
public cardSet: CardSet;
public collectionText: string;
public cost: number;
public costsHealth: boolean;
public elite: boolean;
public health: number;
public hideStats: boolean;
public id: string;
public name: string;
public multiClassGroup: MultiClassGroup;
public rarity: Rarity;
public race: Race;
public silenced: boolean;
public text: string;
public type: CardType;
constructor(props: any) {
this.attack = props.attack || 0;
this.armor = props.armor || 0;
this.cardClass = cleanEnum(props.cardClass, CardClass) as CardClass;
this.cardSet = cleanEnum(props.set, CardSet) as CardSet;
this.cost = props.cost || 0;
this.costsHealth = props.costsHealth || false;
this.elite = props.elite || false;
this.health = props.health || 0;
this.hideStats = props.hideStats || false;
this.multiClassGroup = cleanEnum(props.multiClassGroup, MultiClassGroup) as MultiClassGroup;
this.name = props.name || "";
this.race = cleanEnum(props.race, Race) as Race;
this.rarity = cleanEnum(props.rarity, Rarity) as Rarity;
this.silenced = props.silenced || false;
this.type = cleanEnum(props.type, CardType) as CardType;
if (this.type === CardType.WEAPON && props.durability) {
// Weapons alias health to durability
this.health = props.durability;
} else if (this.type === CardType.HERO && props.armor) |
this.collectionText = props.collectionText || "";
this.text = props.text || "";
}
}
| {
// Hero health gem is Armor
this.health = props.armor;
} | conditional_block |
LSTM2.py | from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import time
from datetime import timedelta
# Import MNIST data
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Parameters
learning_rate = 0.005
training_steps = 15000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def | (x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size,
# n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
loss_group = []
epoch_group = []
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps + 1):
tf.set_random_seed(23)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
loss_group.append(loss)
epoch_group.append(step)
print("Step " + str(step) + ", Minibatch Loss= " +
"{:.4f}".format(loss) + ", Training Accuracy= " +
"{:.3f}".format(acc))
print("Optimization Finished!")
print(loss_group)
print(epoch_group)
plt.plot(epoch_group, loss_group)
plt.show()
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape(
(-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:",
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
| RNN | identifier_name |
LSTM2.py | from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import time
from datetime import timedelta
# Import MNIST data
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Parameters
learning_rate = 0.005
training_steps = 15000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size,
# n_input)
|
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
loss_group = []
epoch_group = []
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps + 1):
tf.set_random_seed(23)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
loss_group.append(loss)
epoch_group.append(step)
print("Step " + str(step) + ", Minibatch Loss= " +
"{:.4f}".format(loss) + ", Training Accuracy= " +
"{:.3f}".format(acc))
print("Optimization Finished!")
print(loss_group)
print(epoch_group)
plt.plot(epoch_group, loss_group)
plt.show()
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape(
(-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:",
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
| x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out'] | identifier_body |
LSTM2.py | from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import time
from datetime import timedelta
# Import MNIST data
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Parameters
learning_rate = 0.005
training_steps = 15000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size,
# n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
loss_group = []
epoch_group = []
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps + 1):
tf.set_random_seed(23)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
|
print("Optimization Finished!")
print(loss_group)
print(epoch_group)
plt.plot(epoch_group, loss_group)
plt.show()
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape(
(-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:",
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
| loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
loss_group.append(loss)
epoch_group.append(step)
print("Step " + str(step) + ", Minibatch Loss= " +
"{:.4f}".format(loss) + ", Training Accuracy= " +
"{:.3f}".format(acc)) | conditional_block |
LSTM2.py | from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
import time
from datetime import timedelta
# Import MNIST data
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Parameters
learning_rate = 0.005
training_steps = 15000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size,
# n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y)) | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
loss_group = []
epoch_group = []
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps + 1):
tf.set_random_seed(23)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
loss_group.append(loss)
epoch_group.append(step)
print("Step " + str(step) + ", Minibatch Loss= " +
"{:.4f}".format(loss) + ", Training Accuracy= " +
"{:.3f}".format(acc))
print("Optimization Finished!")
print(loss_group)
print(epoch_group)
plt.plot(epoch_group, loss_group)
plt.show()
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape(
(-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:",
sess.run(accuracy, feed_dict={X: test_data, Y: test_label})) | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) | random_line_split |
test_connection.ts | /**
* super-orm tests
*
* @author Zongmin Lei <[email protected]>
*/
import orm = require("../");
import utils = require("./utils");
describe("Connection", function () {
it("getConnection() support promise", async function () {
const conn = orm.createConnection({
connections: [ utils.getConnectionConfig() ],
});
// {
// const ret = await conn.query('SELECT JSON_OBJECT("key1", 1, "key2", "abc", "key1", "def") as `data`');
// console.log(ret);
// }
{
const ret = await conn.query("DROP TABLE IF EXISTS `blog_contents`");
console.log(ret);
}
{
const sql = await utils.readTestFile("blog_contents.sql");
const ret = await conn.query(sql);
console.log(ret); | const sql = conn.format("SELECT * FROM ::table WHERE id=:id", {
table: "blog_contents",
id: 2,
});
console.log(sql);
const ret = await conn.query(sql);
console.log(ret);
}
{
const sql = conn.format("SELECT * FROM ?? WHERE id=?", [ "blog_contents", 2 ]);
console.log(sql);
const ret = await conn.query(sql);
console.log(ret);
}
{
const c = await conn.getMasterConnection();
console.log(c.escape(utils.newDate()));
await c.beginTransaction();
try {
const ret = await c.query('INSERT INTO `blog_contents`(`id`,`content`) VALUES (1234, "456")');
console.log(ret);
} catch (err) {
console.log(err);
await c.rollback();
}
try {
const ret = await c.query('INSERT INTO `blog_contents`(`id`,`content`) VALUES (1234, "9999")');
console.log(ret);
} catch (err) {
console.log(err);
await c.rollback();
}
try {
await c.commit();
} catch (err) {
console.log(err);
await c.rollback();
}
c.release();
}
await conn.close();
});
}); | }
{ | random_line_split |
transliteration.py | # -*- coding: utf-8 -*-
import os
import io
import sys
import argparse
ARABIC_LETTERS = [
u'ء', u'آ', u'أ', u'ؤ', u'إ',
u'ئ', u'ا', u'ب', u'ة', u'ت',
u'ث', u'ج', u'ح', u'خ', u'د',
u'ذ', u'ر', u'ز', u'س', u'ش',
u'ص', u'ض', u'ط', u'ظ', u'ع',
u'غ', u'ـ', u'ف', u'ق', u'ك',
u'ل', u'م', u'ن', u'ه', u'و',
u'ى', u'ي', u'ً', u'ٌ', u'ٍ',
u'َ', u'ُ', u'ِ', u'ّ', u'ْ',
]
SYMBOL_LETTERS = [
'\'', '|', '>', '&', '<',
'}', 'A', 'b', 'p', 't',
'v', 'j', 'H', 'x', 'd',
'*', 'r', 'z', 's', '$',
'S', 'D', 'T', 'Z', 'E',
'g', '_', 'f', 'q', 'k',
'l', 'm', 'n', 'h', 'w',
'Y', 'y', 'F', 'N', 'K',
'a', 'u', 'i', '~', 'o'
]
def transliteration(file_path, domain, range):
d = { u:v for u, v in zip(domain, range) }
with io.open(file_path, 'r', encoding='utf8') as file:
lines = file.readlines()
new_lines = list()
for line in lines:
new_line = ''
for ch in line.strip():
if ch in d.keys():
new_line += d[ch] |
file_path = file_path.split(os.sep)
file_path[-1] = 'transliterated_' + file_path[-1]
file_path = os.sep.join(file_path)
with io.open(file_path, 'w', encoding='utf8') as file:
file.write('\n'.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert from/to Buckwalter transliteration')
parser.add_argument('-fp', '--file-path', help='File path to be transliterated', required=True)
parser.add_argument('-tbw', '--to-buckwalter', help='To Buckwalter transliteration from Arabic', required=False, default='False', choices=['True', 'False'])
parser.add_argument('-fbw', '--from-buckwalter', help='From Buckwalter transliteration to Arabic', required=False, default='False', choices=['True', 'False'])
args = parser.parse_args()
if args.to_buckwalter == args.from_buckwalter:
sys.exit('You need either to specify -tbw or -fbw\nRun `%s -h` for more information' % sys.argv[0])
if args.to_buckwalter == 'True':
transliteration(args.file_path, ARABIC_LETTERS, SYMBOL_LETTERS)
else:
transliteration(args.file_path, SYMBOL_LETTERS, ARABIC_LETTERS) | else:
new_line += ch
new_lines.append(new_line) | random_line_split |
transliteration.py | # -*- coding: utf-8 -*-
import os
import io
import sys
import argparse
ARABIC_LETTERS = [
u'ء', u'آ', u'أ', u'ؤ', u'إ',
u'ئ', u'ا', u'ب', u'ة', u'ت',
u'ث', u'ج', u'ح', u'خ', u'د',
u'ذ', u'ر', u'ز', u'س', u'ش',
u'ص', u'ض', u'ط', u'ظ', u'ع',
u'غ', u'ـ', u'ف', u'ق', u'ك',
u'ل', u'م', u'ن', u'ه', u'و',
u'ى', u'ي', u'ً', u'ٌ', u'ٍ',
u'َ', u'ُ', u'ِ', u'ّ', u'ْ',
]
SYMBOL_LETTERS = [
'\'', '|', '>', '&', '<',
'}', 'A', 'b', 'p', 't',
'v', 'j', 'H', 'x', 'd',
'*', 'r', 'z', 's', '$',
'S', 'D', 'T', 'Z', 'E',
'g', '_', 'f', 'q', 'k',
'l', 'm', 'n', 'h', 'w',
'Y', 'y', 'F', 'N', 'K',
'a', 'u', 'i', '~', 'o'
]
def transliteration(file_path, domain, range):
| , v in zip(domain, range) }
with io.open(file_path, 'r', encoding='utf8') as file:
lines = file.readlines()
new_lines = list()
for line in lines:
new_line = ''
for ch in line.strip():
if ch in d.keys():
new_line += d[ch]
else:
new_line += ch
new_lines.append(new_line)
file_path = file_path.split(os.sep)
file_path[-1] = 'transliterated_' + file_path[-1]
file_path = os.sep.join(file_path)
with io.open(file_path, 'w', encoding='utf8') as file:
file.write('\n'.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert from/to Buckwalter transliteration')
parser.add_argument('-fp', '--file-path', help='File path to be transliterated', required=True)
parser.add_argument('-tbw', '--to-buckwalter', help='To Buckwalter transliteration from Arabic', required=False, default='False', choices=['True', 'False'])
parser.add_argument('-fbw', '--from-buckwalter', help='From Buckwalter transliteration to Arabic', required=False, default='False', choices=['True', 'False'])
args = parser.parse_args()
if args.to_buckwalter == args.from_buckwalter:
sys.exit('You need either to specify -tbw or -fbw\nRun `%s -h` for more information' % sys.argv[0])
if args.to_buckwalter == 'True':
transliteration(args.file_path, ARABIC_LETTERS, SYMBOL_LETTERS)
else:
transliteration(args.file_path, SYMBOL_LETTERS, ARABIC_LETTERS)
| d = { u:v for u | identifier_name |
transliteration.py | # -*- coding: utf-8 -*-
import os
import io
import sys
import argparse
ARABIC_LETTERS = [
u'ء', u'آ', u'أ', u'ؤ', u'إ',
u'ئ', u'ا', u'ب', u'ة', u'ت',
u'ث', u'ج', u'ح', u'خ', u'د',
u'ذ', u'ر', u'ز', u'س', u'ش',
u'ص', u'ض', u'ط', u'ظ', u'ع',
u'غ', u'ـ', u'ف', u'ق', u'ك',
u'ل', u'م', u'ن', u'ه', u'و',
u'ى', u'ي', u'ً', u'ٌ', u'ٍ',
u'َ', u'ُ', u'ِ', u'ّ', u'ْ',
]
SYMBOL_LETTERS = [
'\'', '|', '>', '&', '<',
'}', 'A', 'b', 'p', 't',
'v', 'j', 'H', 'x', 'd',
'*', 'r', 'z', 's', '$',
'S', 'D', 'T', 'Z', 'E',
'g', '_', 'f', 'q', 'k',
'l', 'm', 'n', 'h', 'w',
'Y', 'y', 'F', 'N', 'K',
'a', 'u', 'i', '~', 'o'
]
def transliteration(file_path, domain, range):
d = { u:v for u, v in zip(domain, range) }
with io.open(file_path, 'r', encoding='utf8') as file:
lines = file.readlines()
new_lines = list()
for line in lines:
new_line = ''
for ch in line.strip():
if ch in d.keys():
new_line += d[ch]
else:
new_line += ch
new_lines.append(new_line) | = file_path.split(os.sep)
file_path[-1] = 'transliterated_' + file_path[-1]
file_path = os.sep.join(file_path)
with io.open(file_path, 'w', encoding='utf8') as file:
file.write('\n'.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert from/to Buckwalter transliteration')
parser.add_argument('-fp', '--file-path', help='File path to be transliterated', required=True)
parser.add_argument('-tbw', '--to-buckwalter', help='To Buckwalter transliteration from Arabic', required=False, default='False', choices=['True', 'False'])
parser.add_argument('-fbw', '--from-buckwalter', help='From Buckwalter transliteration to Arabic', required=False, default='False', choices=['True', 'False'])
args = parser.parse_args()
if args.to_buckwalter == args.from_buckwalter:
sys.exit('You need either to specify -tbw or -fbw\nRun `%s -h` for more information' % sys.argv[0])
if args.to_buckwalter == 'True':
transliteration(args.file_path, ARABIC_LETTERS, SYMBOL_LETTERS)
else:
transliteration(args.file_path, SYMBOL_LETTERS, ARABIC_LETTERS)
|
file_path | conditional_block |
transliteration.py | # -*- coding: utf-8 -*-
import os
import io
import sys
import argparse
ARABIC_LETTERS = [
u'ء', u'آ', u'أ', u'ؤ', u'إ',
u'ئ', u'ا', u'ب', u'ة', u'ت',
u'ث', u'ج', u'ح', u'خ', u'د',
u'ذ', u'ر', u'ز', u'س', u'ش',
u'ص', u'ض', u'ط', u'ظ', u'ع',
u'غ', u'ـ', u'ف', u'ق', u'ك',
u'ل', u'م', u'ن', u'ه', u'و',
u'ى', u'ي', u'ً', u'ٌ', u'ٍ',
u'َ', u'ُ', u'ِ', u'ّ', u'ْ',
]
SYMBOL_LETTERS = [
'\'', '|', '>', '&', '<',
'}', 'A', 'b', 'p', 't',
'v', 'j', 'H', 'x', 'd',
'*', 'r', 'z', 's', '$',
'S', 'D', 'T', 'Z', 'E',
'g', '_', 'f', 'q', 'k',
'l', 'm', 'n', 'h', 'w',
'Y', 'y', 'F', 'N', 'K',
'a', 'u', 'i', '~', 'o'
]
def transliteration(file_path, domain, range):
d = { u:v for u, v in zip(domain, range) }
| rse.ArgumentParser(description='Convert from/to Buckwalter transliteration')
parser.add_argument('-fp', '--file-path', help='File path to be transliterated', required=True)
parser.add_argument('-tbw', '--to-buckwalter', help='To Buckwalter transliteration from Arabic', required=False, default='False', choices=['True', 'False'])
parser.add_argument('-fbw', '--from-buckwalter', help='From Buckwalter transliteration to Arabic', required=False, default='False', choices=['True', 'False'])
args = parser.parse_args()
if args.to_buckwalter == args.from_buckwalter:
sys.exit('You need either to specify -tbw or -fbw\nRun `%s -h` for more information' % sys.argv[0])
if args.to_buckwalter == 'True':
transliteration(args.file_path, ARABIC_LETTERS, SYMBOL_LETTERS)
else:
transliteration(args.file_path, SYMBOL_LETTERS, ARABIC_LETTERS)
| with io.open(file_path, 'r', encoding='utf8') as file:
lines = file.readlines()
new_lines = list()
for line in lines:
new_line = ''
for ch in line.strip():
if ch in d.keys():
new_line += d[ch]
else:
new_line += ch
new_lines.append(new_line)
file_path = file_path.split(os.sep)
file_path[-1] = 'transliterated_' + file_path[-1]
file_path = os.sep.join(file_path)
with io.open(file_path, 'w', encoding='utf8') as file:
file.write('\n'.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argpa | identifier_body |
urls.py | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License. |
urlpatterns = patterns('openstack_dashboard.dashboards.admin.images.views',
url(r'^images/$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
) |
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.images import views | random_line_split |
codeFixAddVoidToPromise_all.ts | /// <reference path='fourslash.ts' />
// @target: esnext
| ////const p1 = new Promise(resolve => resolve());
////const p2 = new Promise<number>(resolve => resolve());
////const p3 = new Promise<number | string>(resolve => resolve());
////const p4 = new Promise<{ x: number } & { y: string }>(resolve => resolve());
verify.codeFixAll({
fixId: "addVoidToPromise",
fixAllDescription: ts.Diagnostics.Add_void_to_all_Promises_resolved_without_a_value.message,
newFileContent: `const p1 = new Promise<void>(resolve => resolve());
const p2 = new Promise<number | void>(resolve => resolve());
const p3 = new Promise<number | string | void>(resolve => resolve());
const p4 = new Promise<({ x: number } & { y: string }) | void>(resolve => resolve());`
}); | // @lib: es2015
// @strict: true
| random_line_split |
consts.ts | export const Signatures = {
FileMagic: 0x9aa2d903,
Sig2Kdbx: 0xb54bfb67,
Sig2Kdb: 0xb54bfb65
} as const;
export const ErrorCodes = {
NotImplemented: 'NotImplemented',
InvalidArg: 'InvalidArg',
BadSignature: 'BadSignature',
InvalidVersion: 'InvalidVersion',
Unsupported: 'Unsupported',
FileCorrupt: 'FileCorrupt',
InvalidKey: 'InvalidKey',
MergeError: 'MergeError',
InvalidState: 'InvalidState'
} as const;
export const CompressionAlgorithm = {
None: 0,
GZip: 1
} as const;
export const CrsAlgorithm = {
Null: 0,
ArcFourVariant: 1,
Salsa20: 2,
ChaCha20: 3
} as const;
export const KdfId = {
Argon2: '72Nt34wpREuR96mkA+MKDA==',
Argon2d: '72Nt34wpREuR96mkA+MKDA==',
Argon2id: 'nimLGVbbR3OyPfw+xvCh5g==',
Aes: 'ydnzmmKKRGC/dA0IwYpP6g=='
} as const;
export const CipherId = {
Aes: 'McHy5r9xQ1C+WAUhavxa/w==',
ChaCha20: '1gOKK4tvTLWlJDOaMdu1mg=='
} as const;
export const AutoTypeObfuscationOptions = {
None: 0,
UseClipboard: 1
} as const;
export const Defaults = {
KeyEncryptionRounds: 300000,
MntncHistoryDays: 365,
HistoryMaxItems: 10,
HistoryMaxSize: 6 * 1024 * 1024,
RecycleBinName: 'Recycle Bin'
} as const;
export const Icons = {
Key: 0,
World: 1,
Warning: 2,
NetworkServer: 3,
MarkedDirectory: 4,
UserCommunication: 5,
Parts: 6,
Notepad: 7,
WorldSocket: 8,
Identity: 9,
PaperReady: 10,
Digicam: 11,
IRCommunication: 12,
MultiKeys: 13,
Energy: 14,
Scanner: 15,
WorldStar: 16,
CDRom: 17,
Monitor: 18,
EMail: 19,
Configuration: 20,
ClipboardReady: 21,
PaperNew: 22,
Screen: 23,
EnergyCareful: 24,
EMailBox: 25,
Disk: 26,
Drive: 27,
PaperQ: 28,
TerminalEncrypted: 29,
Console: 30,
Printer: 31,
ProgramIcons: 32,
Run: 33,
Settings: 34,
WorldComputer: 35,
Archive: 36,
Homebanking: 37,
DriveWindows: 39,
Clock: 39,
EMailSearch: 40,
PaperFlag: 41,
Memory: 42,
TrashBin: 43,
Note: 44,
Expired: 45,
Info: 46,
Package: 47,
Folder: 48,
FolderOpen: 49,
FolderPackage: 50,
LockOpen: 51,
PaperLocked: 52,
Checked: 53,
Pen: 54,
Thumbnail: 55,
Book: 56,
List: 57,
UserKey: 58,
Tool: 59,
Home: 60, | Tux: 62,
Feather: 63,
Apple: 64,
Wiki: 65,
Money: 66,
Certificate: 67,
BlackBerry: 68
} as const; | Star: 61, | random_line_split |
index.d.ts | // Type definitions for Numeral.js
// Project: https://github.com/adamwdraper/Numeral-js
// Definitions by: Vincent Bortone <https://github.com/vbortone>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// http://numeraljs.com/#locales
interface NumeralJSLocale {
delimiters: {
thousands: string;
decimal: string;
};
abbreviations: {
thousand: string;
million: string;
billion: string;
trillion: string;
};
ordinal(num: number): string;
currency: {
symbol: string;
};
}
type RoundingFunction = (value: number) => number;
// http://numeraljs.com/#custom-formats
interface NumeralJsFormat {
regexps: {
format: RegExp, | },
format: (value: any, format: string, roundingFunction: RoundingFunction) => string,
unformat: (value: string) => number
}
type RegisterType = 'format' | 'locale';
// http://numeraljs.com/#use-it
interface Numeral {
(value?: any): Numeral;
version: string;
isNumeral: boolean;
/**
* This function sets the current locale. If no arguments are passed in,
* it will simply return the current global locale key.
*/
locale(key?: string): string;
/**
* Registers a language definition or a custom format definition.
*
* @param what Allowed values are: either 'format' or 'locale'
* @param key The key of the registerd type, e.g. 'de' for a german locale definition
* @param value The locale definition or the format definitiion
*/
register(what: RegisterType, key: string, value: NumeralJSLocale | NumeralJsFormat): NumeralJSLocale | NumeralJsFormat;
zeroFormat(format: string): void;
nullFormat(format: string): void;
defaultFormat(format: string): void;
clone(): Numeral;
format(inputString?: string, roundingFunction?: RoundingFunction): string;
formatCurrency(inputString?: string): string;
unformat(inputString: string): number;
value(): number;
valueOf(): number;
set (value: any): Numeral;
add(value: any): Numeral;
subtract(value: any): Numeral;
multiply(value: any): Numeral;
divide(value: any): Numeral;
difference(value: any): number;
validate(value: any, culture: any): boolean;
}
declare var numeral: Numeral;
/**
* Usage: <code>import * as numeral from 'numeral'</code>
*/
declare module "numeral" {
export = numeral;
} | unformat: RegExp, | random_line_split |
editor.js | // This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Report builder editor
*
* @module core_reportbuilder/editor
* @copyright 2021 David Matamoros <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
"use strict";
import 'core/inplace_editable';
import Notification from 'core/notification';
import Pending from 'core/pending';
import Templates from 'core/templates';
import * as reportSelectors from 'core_reportbuilder/local/selectors';
import {init as columnsEditorInit} from 'core_reportbuilder/local/editor/columns';
import {init as conditionsEditorInit} from 'core_reportbuilder/local/editor/conditions';
import {init as filtersEditorInit} from 'core_reportbuilder/local/editor/filters';
import {init as sortingEditorInit} from 'core_reportbuilder/local/editor/sorting';
import {getReport} from 'core_reportbuilder/local/repository/reports';
let initialized = false;
/**
* Initialise editor and all it's modules
*/
export const init = () => {
const reportElement = document.querySelector(reportSelectors.regions.report);
columnsEditorInit(reportElement, initialized);
conditionsEditorInit(reportElement, initialized);
filtersEditorInit(reportElement, initialized);
sortingEditorInit(reportElement, initialized);
// Ensure we only add our listeners once (can be called multiple times by mustache template).
if (initialized) {
return;
}
// Add event handlers to generic report editor elements.
document.addEventListener('click', event => {
// Toggle between edit and preview mode.
const toggleEditViewMode = event.target.closest(reportSelectors.actions.toggleEditPreview);
if (toggleEditViewMode) {
event.preventDefault();
const pendingPromise = new Pending('core_reportbuilder/reports:get'); | .then(response => {
customjs = response.javascript;
return Templates.render('core_reportbuilder/custom_report', response);
})
.then((html, js) => {
return Templates.replaceNodeContents(reportElement, html, js + customjs);
})
.then(() => pendingPromise.resolve())
.catch(Notification.exception);
}
});
initialized = true;
}; | const toggledEditMode = toggleEditViewMode.dataset.editMode !== "1";
let customjs = '';
getReport(reportElement.dataset.reportId, toggledEditMode) | random_line_split |
editor.js | // This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Report builder editor
*
* @module core_reportbuilder/editor
* @copyright 2021 David Matamoros <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
"use strict";
import 'core/inplace_editable';
import Notification from 'core/notification';
import Pending from 'core/pending';
import Templates from 'core/templates';
import * as reportSelectors from 'core_reportbuilder/local/selectors';
import {init as columnsEditorInit} from 'core_reportbuilder/local/editor/columns';
import {init as conditionsEditorInit} from 'core_reportbuilder/local/editor/conditions';
import {init as filtersEditorInit} from 'core_reportbuilder/local/editor/filters';
import {init as sortingEditorInit} from 'core_reportbuilder/local/editor/sorting';
import {getReport} from 'core_reportbuilder/local/repository/reports';
let initialized = false;
/**
* Initialise editor and all it's modules
*/
export const init = () => {
const reportElement = document.querySelector(reportSelectors.regions.report);
columnsEditorInit(reportElement, initialized);
conditionsEditorInit(reportElement, initialized);
filtersEditorInit(reportElement, initialized);
sortingEditorInit(reportElement, initialized);
// Ensure we only add our listeners once (can be called multiple times by mustache template).
if (initialized) {
return;
}
// Add event handlers to generic report editor elements.
document.addEventListener('click', event => {
// Toggle between edit and preview mode.
const toggleEditViewMode = event.target.closest(reportSelectors.actions.toggleEditPreview);
if (toggleEditViewMode) |
});
initialized = true;
};
| {
event.preventDefault();
const pendingPromise = new Pending('core_reportbuilder/reports:get');
const toggledEditMode = toggleEditViewMode.dataset.editMode !== "1";
let customjs = '';
getReport(reportElement.dataset.reportId, toggledEditMode)
.then(response => {
customjs = response.javascript;
return Templates.render('core_reportbuilder/custom_report', response);
})
.then((html, js) => {
return Templates.replaceNodeContents(reportElement, html, js + customjs);
})
.then(() => pendingPromise.resolve())
.catch(Notification.exception);
} | conditional_block |
win_reboot.py | # (c) 2016, Matt Davis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils._text import to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.'
def get_system_uptime(self):
uptime_command = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
(rc, stdout, stderr) = self._connection.exec_command(uptime_command)
if rc != 0:
raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s"
% (rc, stdout, stderr))
return stdout
def do_until_success_or_timeout(self, what, timeout, what_desc, fail_sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
exc = ""
while datetime.utcnow() < max_end_time:
try:
what()
if what_desc:
display.debug("win_reboot: %s success" % what_desc)
return
except Exception as e:
exc = e
if what_desc:
display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep))
time.sleep(fail_sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc))
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return dict(changed=True, elapsed=0, rebooted=True)
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
# Handle timeout parameters and its alias
deprecated_args = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
for arg, version in deprecated_args.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))
if self._task.args.get('connect_timeout') is not None:
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
else:
connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
if self._task.args.get('reboot_timeout') is not None:
reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
else:
reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))
if self._task.args.get('pre_reboot_delay') is not None:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
else:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY))
if self._task.args.get('post_reboot_delay') is not None:
post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
else:
post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))
test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))
# Get current uptime
try:
before_uptime = self.get_system_uptime()
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_native(e)
return result
# Initiate reboot
display.vvv("rebooting server")
(rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if rc == 1190:
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
(rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')
# Initiate reboot again
(rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
stdout += stdout1 + stdout2
stderr += stderr1 + stderr2
if rc != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Shutdown command failed, error text was %s" % stderr
return result
start = datetime.now()
# Get the original connection_timeout option var so it can be reset after
connection_timeout_orig = None
try:
connection_timeout_orig = self._connection.get_option('connection_timeout')
except AnsibleError:
display.debug("win_reboot: connection_timeout connection option has not been set")
try:
# keep on checking system uptime with short connection responses
def check_uptime():
display.vvv("attempting to get system uptime")
# override connection timeout from defaults to custom value
try:
self._connection.set_options(direct={"connection_timeout": connect_timeout})
self._connection._reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get uptime
try:
current_uptime = self.get_system_uptime() | except Exception as e:
raise e
if current_uptime == before_uptime:
raise Exception("uptime has not changed")
self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")
# reset the connection to clear the custom connection timeout
try:
self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
self._connection._reset()
except (AnsibleError, AttributeError):
display.debug("Failed to reset connection_timeout back to default")
# finally run test command to ensure everything is working
def run_test_command():
display.vvv("attempting post-reboot test command '%s'" % test_command)
(rc, stdout, stderr) = self._connection.exec_command(test_command)
if rc != 0:
raise Exception('test command failed')
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_native(toex)
if post_reboot_delay != 0:
display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
time.sleep(post_reboot_delay)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
return result | random_line_split |
|
win_reboot.py | # (c) 2016, Matt Davis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils._text import to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.'
def get_system_uptime(self):
uptime_command = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
(rc, stdout, stderr) = self._connection.exec_command(uptime_command)
if rc != 0:
raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s"
% (rc, stdout, stderr))
return stdout
def do_until_success_or_timeout(self, what, timeout, what_desc, fail_sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
exc = ""
while datetime.utcnow() < max_end_time:
try:
what()
if what_desc:
display.debug("win_reboot: %s success" % what_desc)
return
except Exception as e:
exc = e
if what_desc:
display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep))
time.sleep(fail_sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc))
def | (self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return dict(changed=True, elapsed=0, rebooted=True)
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
# Handle timeout parameters and its alias
deprecated_args = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
for arg, version in deprecated_args.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))
if self._task.args.get('connect_timeout') is not None:
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
else:
connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
if self._task.args.get('reboot_timeout') is not None:
reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
else:
reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))
if self._task.args.get('pre_reboot_delay') is not None:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
else:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY))
if self._task.args.get('post_reboot_delay') is not None:
post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
else:
post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))
test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))
# Get current uptime
try:
before_uptime = self.get_system_uptime()
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_native(e)
return result
# Initiate reboot
display.vvv("rebooting server")
(rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if rc == 1190:
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
(rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')
# Initiate reboot again
(rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
stdout += stdout1 + stdout2
stderr += stderr1 + stderr2
if rc != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Shutdown command failed, error text was %s" % stderr
return result
start = datetime.now()
# Get the original connection_timeout option var so it can be reset after
connection_timeout_orig = None
try:
connection_timeout_orig = self._connection.get_option('connection_timeout')
except AnsibleError:
display.debug("win_reboot: connection_timeout connection option has not been set")
try:
# keep on checking system uptime with short connection responses
def check_uptime():
display.vvv("attempting to get system uptime")
# override connection timeout from defaults to custom value
try:
self._connection.set_options(direct={"connection_timeout": connect_timeout})
self._connection._reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get uptime
try:
current_uptime = self.get_system_uptime()
except Exception as e:
raise e
if current_uptime == before_uptime:
raise Exception("uptime has not changed")
self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")
# reset the connection to clear the custom connection timeout
try:
self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
self._connection._reset()
except (AnsibleError, AttributeError):
display.debug("Failed to reset connection_timeout back to default")
# finally run test command to ensure everything is working
def run_test_command():
display.vvv("attempting post-reboot test command '%s'" % test_command)
(rc, stdout, stderr) = self._connection.exec_command(test_command)
if rc != 0:
raise Exception('test command failed')
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_native(toex)
if post_reboot_delay != 0:
display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
time.sleep(post_reboot_delay)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
return result
| run | identifier_name |
win_reboot.py | # (c) 2016, Matt Davis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils._text import to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.'
def get_system_uptime(self):
uptime_command = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
(rc, stdout, stderr) = self._connection.exec_command(uptime_command)
if rc != 0:
raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s"
% (rc, stdout, stderr))
return stdout
def do_until_success_or_timeout(self, what, timeout, what_desc, fail_sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
exc = ""
while datetime.utcnow() < max_end_time:
try:
what()
if what_desc:
display.debug("win_reboot: %s success" % what_desc)
return
except Exception as e:
exc = e
if what_desc:
|
time.sleep(fail_sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc))
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return dict(changed=True, elapsed=0, rebooted=True)
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
# Handle timeout parameters and its alias
deprecated_args = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
for arg, version in deprecated_args.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))
if self._task.args.get('connect_timeout') is not None:
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
else:
connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
if self._task.args.get('reboot_timeout') is not None:
reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
else:
reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))
if self._task.args.get('pre_reboot_delay') is not None:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
else:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY))
if self._task.args.get('post_reboot_delay') is not None:
post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
else:
post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))
test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))
# Get current uptime
try:
before_uptime = self.get_system_uptime()
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_native(e)
return result
# Initiate reboot
display.vvv("rebooting server")
(rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if rc == 1190:
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
(rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')
# Initiate reboot again
(rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
stdout += stdout1 + stdout2
stderr += stderr1 + stderr2
if rc != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Shutdown command failed, error text was %s" % stderr
return result
start = datetime.now()
# Get the original connection_timeout option var so it can be reset after
connection_timeout_orig = None
try:
connection_timeout_orig = self._connection.get_option('connection_timeout')
except AnsibleError:
display.debug("win_reboot: connection_timeout connection option has not been set")
try:
# keep on checking system uptime with short connection responses
def check_uptime():
display.vvv("attempting to get system uptime")
# override connection timeout from defaults to custom value
try:
self._connection.set_options(direct={"connection_timeout": connect_timeout})
self._connection._reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get uptime
try:
current_uptime = self.get_system_uptime()
except Exception as e:
raise e
if current_uptime == before_uptime:
raise Exception("uptime has not changed")
self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")
# reset the connection to clear the custom connection timeout
try:
self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
self._connection._reset()
except (AnsibleError, AttributeError):
display.debug("Failed to reset connection_timeout back to default")
# finally run test command to ensure everything is working
def run_test_command():
display.vvv("attempting post-reboot test command '%s'" % test_command)
(rc, stdout, stderr) = self._connection.exec_command(test_command)
if rc != 0:
raise Exception('test command failed')
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_native(toex)
if post_reboot_delay != 0:
display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
time.sleep(post_reboot_delay)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
return result
| display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep)) | conditional_block |
win_reboot.py | # (c) 2016, Matt Davis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from datetime import datetime, timedelta
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils._text import to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class TimedOutException(Exception):
pass
class ActionModule(ActionBase):
TRANSFERS_FILES = False
DEFAULT_REBOOT_TIMEOUT = 600
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_PRE_REBOOT_DELAY = 2
DEFAULT_POST_REBOOT_DELAY = 0
DEFAULT_TEST_COMMAND = 'whoami'
DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible.'
def get_system_uptime(self):
uptime_command = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
(rc, stdout, stderr) = self._connection.exec_command(uptime_command)
if rc != 0:
raise Exception("win_reboot: failed to get host uptime info, rc: %d, stdout: %s, stderr: %s"
% (rc, stdout, stderr))
return stdout
def do_until_success_or_timeout(self, what, timeout, what_desc, fail_sleep=1):
max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
exc = ""
while datetime.utcnow() < max_end_time:
try:
what()
if what_desc:
display.debug("win_reboot: %s success" % what_desc)
return
except Exception as e:
exc = e
if what_desc:
display.debug("win_reboot: %s fail (expected), retrying in %d seconds..." % (what_desc, fail_sleep))
time.sleep(fail_sleep)
raise TimedOutException("timed out waiting for %s: %s" % (what_desc, exc))
def run(self, tmp=None, task_vars=None):
| self._supports_check_mode = True
self._supports_async = True
if self._play_context.check_mode:
return dict(changed=True, elapsed=0, rebooted=True)
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped', False) or result.get('failed', False):
return result
# Handle timeout parameters and its alias
deprecated_args = {
'shutdown_timeout': '2.5',
'shutdown_timeout_sec': '2.5',
}
for arg, version in deprecated_args.items():
if self._task.args.get(arg) is not None:
display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))
if self._task.args.get('connect_timeout') is not None:
connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
else:
connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
if self._task.args.get('reboot_timeout') is not None:
reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
else:
reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))
if self._task.args.get('pre_reboot_delay') is not None:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
else:
pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY))
if self._task.args.get('post_reboot_delay') is not None:
post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
else:
post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))
test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))
# Get current uptime
try:
before_uptime = self.get_system_uptime()
except Exception as e:
result['failed'] = True
result['reboot'] = False
result['msg'] = to_native(e)
return result
# Initiate reboot
display.vvv("rebooting server")
(rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))
# Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
if rc == 1190:
display.warning('A scheduled reboot was pre-empted by Ansible.')
# Try to abort (this may fail if it was already aborted)
(rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')
# Initiate reboot again
(rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
stdout += stdout1 + stdout2
stderr += stderr1 + stderr2
if rc != 0:
result['failed'] = True
result['rebooted'] = False
result['msg'] = "Shutdown command failed, error text was %s" % stderr
return result
start = datetime.now()
# Get the original connection_timeout option var so it can be reset after
connection_timeout_orig = None
try:
connection_timeout_orig = self._connection.get_option('connection_timeout')
except AnsibleError:
display.debug("win_reboot: connection_timeout connection option has not been set")
try:
# keep on checking system uptime with short connection responses
def check_uptime():
display.vvv("attempting to get system uptime")
# override connection timeout from defaults to custom value
try:
self._connection.set_options(direct={"connection_timeout": connect_timeout})
self._connection._reset()
except AttributeError:
display.warning("Connection plugin does not allow the connection timeout to be overridden")
# try and get uptime
try:
current_uptime = self.get_system_uptime()
except Exception as e:
raise e
if current_uptime == before_uptime:
raise Exception("uptime has not changed")
self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")
# reset the connection to clear the custom connection timeout
try:
self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
self._connection._reset()
except (AnsibleError, AttributeError):
display.debug("Failed to reset connection_timeout back to default")
# finally run test command to ensure everything is working
def run_test_command():
display.vvv("attempting post-reboot test command '%s'" % test_command)
(rc, stdout, stderr) = self._connection.exec_command(test_command)
if rc != 0:
raise Exception('test command failed')
# FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")
result['rebooted'] = True
result['changed'] = True
except TimedOutException as toex:
result['failed'] = True
result['rebooted'] = True
result['msg'] = to_native(toex)
if post_reboot_delay != 0:
display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
time.sleep(post_reboot_delay)
elapsed = datetime.now() - start
result['elapsed'] = elapsed.seconds
return result | identifier_body |
|
angular-animate-stylers.js | angular.module('ngAnimateStylers', ['ngAnimateSequence'])
.config(['$$animateStylerProvider', function($$animateStylerProvider)
{
//JQUERY
$$animateStylerProvider.register('jQuery', function() {
return function(element, pre, duration, delay) {
delay = delay || 0;
element.css(pre);
return function(post, done) {
element.animate(post, duration, null, done);
}
};
});
//NOT WORKING
$$animateStylerProvider.register('webAnimations', function() {
return function(element, pre, duration, delay) {
delay = delay || 0;
duration = duration || 1000;
element.css(pre);
return function(post, done) {
var animation = element[0].animate({ 'border-width' : '100px'}, 5000);
//player.onfinish = done;
}
};
});
// Greensock Animation Platform (GSAP)
$$animateStylerProvider.register('gsap', function() {
return function(element, pre, duration, delay) {
var styler = TweenMax || TweenLite;
if ( !styler) |
return function(post, done) {
styler.fromTo(
element,
(duration || 0)/1000,
pre || { },
angular.extend( post, {onComplete:done, delay: (delay || 0)/1000} )
);
}
};
});
}]);
| {
throw new Error("GSAP TweenMax or TweenLite is not defined for use within $$animationStylerProvider.");
} | conditional_block |
angular-animate-stylers.js | angular.module('ngAnimateStylers', ['ngAnimateSequence'])
.config(['$$animateStylerProvider', function($$animateStylerProvider)
{
//JQUERY
$$animateStylerProvider.register('jQuery', function() {
return function(element, pre, duration, delay) {
delay = delay || 0;
element.css(pre);
return function(post, done) {
element.animate(post, duration, null, done);
}
};
}); | return function(element, pre, duration, delay) {
delay = delay || 0;
duration = duration || 1000;
element.css(pre);
return function(post, done) {
var animation = element[0].animate({ 'border-width' : '100px'}, 5000);
//player.onfinish = done;
}
};
});
// Greensock Animation Platform (GSAP)
$$animateStylerProvider.register('gsap', function() {
return function(element, pre, duration, delay) {
var styler = TweenMax || TweenLite;
if ( !styler) {
throw new Error("GSAP TweenMax or TweenLite is not defined for use within $$animationStylerProvider.");
}
return function(post, done) {
styler.fromTo(
element,
(duration || 0)/1000,
pre || { },
angular.extend( post, {onComplete:done, delay: (delay || 0)/1000} )
);
}
};
});
}]); |
//NOT WORKING
$$animateStylerProvider.register('webAnimations', function() { | random_line_split |
Subsets and Splits