src
stringlengths 721
1.04M
|
---|
# Write a function
# def mergeSorted(a, b)
# that merges two sorted lists, producing a new sorted list. Keep an index into each list,
# indicating how much of it has been processed already. Each time, append the small-
# est unprocessed element from either list, then advance the index. For example, if a is
# 1 4 9 16
# and b is
# 4 7 9 9 11
# then mergeSorted returns a new list containing the values
# 1 4 4 7 9 9 9 11 16
# FUNCTIONS
def mergeSorted(listA, listB):
i, j, k = 0, 0, 0
outputList = [ ]
while i < len(listA) and j < len(listB):
if listA[i] < listB[j]:
outputList.append(listA[i])
i += 1
else:
outputList.append(listB[j])
j += 1
k += 1
if i < len(listA):
while i < len(listA):
outputList.append(listA[i])
i += 1
k += 1
elif j < len(listB):
while j < len(listB):
outputList.append(listB[j])
j += 1
k += 1
return outputList
# main
def main():
exampleListA = [ 1, 4, 9, 16 ]
exampleListB = [ 4, 7, 9, 9, 11 ]
print(exampleListA)
print(exampleListB)
print("After merge sort")
print(mergeSorted(exampleListA, exampleListB))
# PROGRAM RUN
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
account_name: "_models.StorageAccountCheckNameAvailabilityParameters",
**kwargs
) -> "_models.CheckNameAvailabilityResult":
"""Checks that the storage account name is valid and is not already in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Asynchronously creates a new storage account with the specified parameters. If an account is
already created and a subsequent create request is issued with different properties, the
account properties will be updated. If an account is already created and a subsequent create or
update request is issued with the exact same set of properties, the request will succeed.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2021_02_01.models.StorageAccount]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def get_properties(
self,
resource_group_name: str,
account_name: str,
expand: Optional[Union[str, "_models.StorageAccountExpand"]] = None,
**kwargs
) -> "_models.StorageAccount":
"""Returns the properties for the specified storage account including but not limited to name, SKU
name, location, and account status. The ListKeys operation should be used to retrieve storage
keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: May be used to expand the properties within account's properties. By default,
data is not included when fetching properties. Currently we only support geoReplicationStats
and blobRestoreStatus.
:type expand: str or ~azure.mgmt.storage.v2021_02_01.models.StorageAccountExpand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs
) -> "_models.StorageAccount":
"""The update operation can be used to update the SKU, encryption, access tier, or tags for a
storage account. It can also be used to map the account to a custom domain. Only one custom
domain is supported per storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must be cleared/unregistered
before a new value can be set. The update of multiple properties is supported. This call does
not change the storage keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage account cannot be
changed after creation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
async def list_keys(
self,
resource_group_name: str,
account_name: str,
expand: Optional[str] = "kerb",
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Lists the access keys or Kerberos keys (if active directory enabled) for the specified storage
account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: Specifies type of the key to be listed. Possible value is kerb.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignore
async def regenerate_key(
self,
resource_group_name: str,
account_name: str,
regenerate_key: "_models.StorageAccountRegenerateKeyParameters",
**kwargs
) -> "_models.StorageAccountListKeysResult":
"""Regenerates one of the access keys or Kerberos keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated -- key1, key2,
kerb1, kerb2.
:type regenerate_key: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountRegenerateKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignore
async def list_account_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.AccountSasParameters",
**kwargs
) -> "_models.ListAccountSasResponse":
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials for the storage account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.AccountSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListAccountSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.ListAccountSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_account_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignore
async def list_service_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.ServiceSasParameters",
**kwargs
) -> "_models.ListServiceSasResponse":
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS credentials.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.ServiceSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListServiceSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.ListServiceSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_service_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignore
async def _failover_initial(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
# Construct URL
url = self._failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
async def begin_failover(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Failover request can be triggered for a storage account in case of availability issues. The
failover occurs from the storage account's primary cluster to secondary cluster for RA-GRS
accounts. The secondary cluster will become primary after failover.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._failover_initial(
resource_group_name=resource_group_name,
account_name=account_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
async def _restore_blob_ranges_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.BlobRestoreParameters",
**kwargs
) -> "_models.BlobRestoreStatus":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobRestoreStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._restore_blob_ranges_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BlobRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restore_blob_ranges_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges'} # type: ignore
async def begin_restore_blob_ranges(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.BlobRestoreParameters",
**kwargs
) -> AsyncLROPoller["_models.BlobRestoreStatus"]:
"""Restore blobs in the specified blob ranges.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for restore blob ranges.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.BlobRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BlobRestoreStatus or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2021_02_01.models.BlobRestoreStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobRestoreStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restore_blob_ranges_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restore_blob_ranges.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges'} # type: ignore
async def revoke_user_delegation_keys(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> None:
"""Revoke user delegation keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
# Construct URL
url = self.revoke_user_delegation_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
revoke_user_delegation_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys'} # type: ignore
|
from flask import Blueprint, flash, redirect, render_template, url_for
from flask_babel import lazy_gettext as _
from flask_login import current_user
from app import db
from app.decorators import require_role
from app.forms import init_form
from app.forms.contact import ContactForm
from app.models.contact import Contact
from app.models.location import Location
from app.roles import Roles
from app.service import role_service
blueprint = Blueprint('contact', __name__, url_prefix='/contacts')
@blueprint.route('/', methods=['GET', 'POST'])
@blueprint.route('/<int:page_nr>/', methods=['GET', 'POST'])
@require_role(Roles.VACANCY_READ)
def list(page_nr=1):
"""Show a paginated list of contacts."""
contacts = Contact.query.paginate(page_nr, 15, False)
can_write = role_service.user_has_role(current_user, Roles.VACANCY_WRITE)
return render_template('contact/list.htm', contacts=contacts,
can_write=can_write)
@blueprint.route('/create/', methods=['GET', 'POST'])
@blueprint.route('/edit/<int:contact_id>/', methods=['GET', 'POST'])
@require_role(Roles.VACANCY_WRITE)
def edit(contact_id=None):
"""Create or edit a contact, frontend."""
if contact_id:
contact = Contact.query.get(contact_id)
else:
contact = Contact()
form = init_form(ContactForm, obj=contact)
locations = Location.query.order_by(
Location.address).order_by(Location.city)
form.location_id.choices = \
[(l.id, '%s, %s' % (l.address, l.city)) for l in locations]
if form.validate_on_submit():
if not contact.id and Contact.query.filter(
Contact.email == form.email.data).count():
flash(_('Contact email "%s" is already in use.' %
form.email.data), 'danger')
return render_template('contact/edit.htm', contact=contact,
form=form)
form.populate_obj(contact)
db.session.add(contact)
db.session.commit()
flash(_('Contact person saved.'), 'success')
return redirect(url_for('contact.edit', contact_id=contact.id))
return render_template('contact/edit.htm', contact=contact, form=form)
@blueprint.route('/delete/<int:contact_id>/', methods=['POST'])
@require_role(Roles.VACANCY_WRITE)
def delete(contact_id):
"""Delete a contact."""
contact = Contact.query.get_or_404(contact_id)
db.session.delete(contact)
db.session.commit()
flash(_('Contact person deleted.'), 'success')
return redirect(url_for('contact.list'))
|
from datetime import timedelta
import mock
from unittest import TestCase
import warnings
from featureforge.experimentation.stats_manager import StatsManager
DEPRECATION_MSG = (
'Init arguments will change. '
'Take a look to http://feature-forge.readthedocs.io/en/latest/experimentation.html'
'#exploring-the-finished-experiments'
)
DB_CONNECTION_PATH = 'featureforge.experimentation.stats_manager.StatsManager.setup_database_connection' # NOQA
class TestStatsManager(TestCase):
def setUp(self):
self.db_name = 'a_db_name'
self.booking_duration = 10
def test_init_with_db_name_as_first_parameter_and_booking_duration_as_second(self):
with mock.patch(DB_CONNECTION_PATH):
st = StatsManager(db_name=self.db_name, booking_duration=self.booking_duration)
self.assertEqual(st._db_config['name'], self.db_name)
self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration))
def test_if_init_with_db_name_as_second_argument_will_warning(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always", DeprecationWarning)
# Trigger a warning.
with mock.patch(DB_CONNECTION_PATH):
StatsManager(self.booking_duration, self.db_name)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(str(w[-1].message), DEPRECATION_MSG)
def test_if_use_db_name_as_second_argument_warnings_but_can_continue(self):
with warnings.catch_warnings(record=True):
# Cause all warnings to always be triggered.
warnings.simplefilter("always", DeprecationWarning)
# Trigger a warning.
with mock.patch(DB_CONNECTION_PATH):
st = StatsManager(self.booking_duration, self.db_name)
self.assertEqual(st._db_config['name'], self.db_name)
self.assertEqual(st.booking_delta, timedelta(seconds=self.booking_duration))
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.class_head."""
import unittest
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MaskRCNNClassHeadTest(test_case.TestCase):
def _build_fc_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=False,
num_class_slots=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5)
def graph_fn():
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = class_prediction_head(roi_pooled_features)
return prediction
prediction = self.execute(graph_fn, [])
self.assertAllEqual([64, 1, 20], prediction.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
def graph_fn():
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
return class_predictions
class_predictions = self.execute(graph_fn, [])
self.assertAllEqual([64, 323, 20], class_predictions.shape)
def test_variable_count_depth_wise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_prediction_head(image_feature)
self.assertEqual(len(class_prediction_head.variables), 3)
def test_variable_count_depth_wise_False(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_prediction_head(image_feature)
self.assertEqual(len(class_prediction_head.variables), 2)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in AudioSource"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../AudioSource.spd.xml") # By default tests all implementations
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def plot(srv_app, srv_lwip, cli_app, cli_lwip):
#srv_app = {0:[],1:[],2:[]}
#srv_lwip = {0:[],1:[],2:[]}
#cli_app = {0:[],1:[],2:[]}
#cli_lwip = {0:[],1:[],2:[]}
O2lwip=cli_lwip[2]
O2comp=cli_app[2]
O1lwip=cli_lwip[1]
O1comp=cli_app[1]
O0lwip=cli_lwip[0]
O0comp=cli_app[0]
colorsred = ['brown', 'red', 'tomato', 'lightsalmon']
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =['navy', 'blue', 'steelblue', 'lightsteelblue']
hatches = ['//', '++', 'xxx', 'oo','\\\\\\', 'OO', '..' , '---', "**"]
label_size=15
font_size=15
#client
N = 3
width = 0.25 # the width of the bars
xtra_space = 0.02
ind = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind1 = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind2 = np.arange(N) + 2+(N+1) - (width*3+xtra_space*2)/2 # the x locations for the groups
ind3 = np.arange(N) + 2+N+1+N+1 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind = np.append(ind1, ind2)
ind = np.append(ind, ind3)
#ind = np.append(ind, ind4)
#ind = np.append(ind, ind5)
fig, ax = plt.subplots(2)
a1 = ax[0].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[0].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[0].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[0].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[0].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[0].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
OLevel = ["O-0", "O-1", "O-2", "O-3"]
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - lwIP", " - App."]
legend_size=16
plt.figlegend(
(
a1, a2,
b1, b2,
c1, c2
),
(
OLevel[2]+duration_type[1], OLevel[2]+duration_type[0],
OLevel[1]+duration_type[1], OLevel[1]+duration_type[0],
OLevel[0]+duration_type[1], OLevel[0]+duration_type[0]
),
scatterpoints=1,
loc='upper center',
ncol=3,
prop={'size':legend_size})
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[0].set_xticks( xticks )
ax[0].set_xticks( xticks_minor, minor=True )
ax[0].set_xticklabels( xlbls )
ax[0].set_xlim( 1, 13 )
ax[0].grid( 'off', axis='x' )
ax[0].grid( 'off', axis='x', which='minor' )
# vertical alignment of xtick labels
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[0].get_xticklabels( ), va ):
t.set_y( y )
ax[0].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
#ax.tick_params( axis='x', which='major', direction='out', length=10 )
ax[0].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[0].get_yticks()
ax[0].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#server
O2lwip=srv_lwip[2]
O2comp=srv_app[2]
O1lwip=srv_lwip[1]
O1comp=srv_app[1]
O0lwip=srv_lwip[0]
O0comp=srv_app[0]
a1 = ax[1].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[1].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[1].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[1].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[1].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[1].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - Communication", " - Computation"]
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[1].set_xticks( xticks )
ax[1].set_xticks( xticks_minor, minor=True )
ax[1].set_xticklabels( xlbls )
ax[1].set_xlim( 1, 13 )
ax[1].grid( 'off', axis='x' )
ax[1].grid( 'off', axis='x', which='minor' )
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[1].get_xticklabels( ), va ):
t.set_y( y )
ax[1].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
ax[1].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[1].get_yticks()
ax[1].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
# add some text for labels, title and axes ticks
ax[0].set_ylabel('Core Utilization', fontsize=label_size)
ax[0].set_xlabel('Client', fontsize=label_size)
ax[1].set_ylabel('Core Utilization', fontsize=label_size)
ax[1].set_xlabel('Server', fontsize=label_size)
ax[0].tick_params(axis='y', labelsize=font_size)
ax[1].tick_params(axis='y', labelsize=font_size)
ax[0].tick_params(axis='x', labelsize=font_size)
ax[1].tick_params(axis='x', labelsize=font_size)
plt.show()
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils.extmath import fast_logdet, safe_sparse_dot, randomized_svd, \
fast_dot
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
""" Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = (self.explained_variance_ /
self.explained_variance_.sum())
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(self.explained_variance_,
n_samples, n_features)
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = self.explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
self.components_ = self.components_[:n_components, :]
self.explained_variance_ = \
self.explained_variance_[:n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:n_components]
self.n_components_ = n_components
return (U, S, V)
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
self._dim = n_features
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
n_components = self.n_components
if n_components is None:
n_components = n_features
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
self.explained_variance_[:n_components],
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = (Xr ** 2).sum() / (n_samples * n_features)
else:
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.mean_ = None
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if hasattr(X, 'todense'):
warnings.warn("Sparse matrix support is deprecated"
" and will be dropped in 0.16."
" Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if not hasattr(X, 'todense'):
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
self.explained_variance_ratio_ = exp_var / exp_var.sum()
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
"""
This code generates frames from CSV values that can be stiched together using FFMPEG
to animate pedestrian data. This version produces an animation at 4x speed.
"""
print "Importing..."
# Please ensure the following dependencies are installed before use:
import pylab
import numpy as np
import itertools
import sys, getopt
import operator
import collections
drawing_by_frame = []
#
def generate_frames(argv):
# Some default values if nothing is provided in command line arguments.
traces = 'bubble_pop_traces.csv'
background = 'trails_480.png'
# Get command line arguments.
# -f specify a file name. This code expects csv files in the format PedestrianID, X, Y, FrameNum
# -b specify a backgroun image. Any format available to pylab is acceptable.
try:
opts,args = getopt.getopt(argv, "f:b:")
except getopt.GetoptError:
print "Getopt Error"
exit(2)
for opt, arg in opts:
if opt == "-f":
traces = arg
elif opt == "-b":
background = arg
# Name each frame based on the filename
figure_name = traces.split("/")[-1].split(".")[-2]
# Load up csv file
trace = np.loadtxt(traces, comments=';', delimiter=',')
traces = itertools.groupby(trace, lambda x:x[0])
# These values should match those in pedestrian_tracking.py
w,h=640,360
border=20
# Some values from trail validation
valid = 0
avg_length = 0
num_traces = 0
# Load up background image.
background = pylab.imread(background)
pylab.imshow(background)
for id,t in traces:
pts = np.array(list(t))
invalid = False
# Validate Trails
if (pts[0,1]>border and pts[0,1]<w-border) and (pts[0,2]>border and pts[0,2]<h-border):
invalid = True
if (pts[-1,1]>border and pts[-1,1]<w-border) and (pts[-1,2]>border and pts[-1,2]<h-border):
invalid = True
if len(pts) < 200:
invalid = True
if ((pts[0,2] > h-border) and (pts[0,1] > w/2-75 and pts[0,1] < w/2+75) or (pts[-1,2] > h-border) and (pts[-1,1] > w/2-75 and pts[-1,1] < w/2+75)):
invalid = True
# For all valid trails, prepare them for generating animated trails by frame number
if not invalid:
num_traces += 1
avg_length += len(pts)
# Drawing colour for traces given as RGB
colour = (0,0,1)
for pt in pts:
this_frame = [pt[3], pt[1], pt[2], pt[0]]
drawing_by_frame.append(this_frame)
valid += 1
x = np.clip(pts[:,1],0,w)
y = np.clip(pts[:,2],0,h)
print "Valid Trails: " , valid, " Average Length:" , avg_length/num_traces
drawing_by_frame.sort()
last_frame = drawing_by_frame[-1][0]
current_frame = drawing_by_frame[0][0]
drawing_dict = collections.defaultdict(list)
count = 0
while len(drawing_by_frame) > 0:
#print "Next Frame, " , current_frame
pylab.imshow(background)
while drawing_by_frame[0][0] == current_frame:
list_one = drawing_by_frame.pop(0)
x = drawing_dict[list_one[3]]
x.append([list_one[1], list_one[2]])
drawing_dict[list_one[3]] = x
# Adjust mod value here to adjust frame drawing frequency
# Draw stuff here
if (current_frame % 10 ==0):
print "Percentage Complete: " , (current_frame/last_frame)*100
draw_dict(drawing_dict, w, h, border, figure_name, current_frame, count)
count += 1
pylab.clf()
current_frame = drawing_by_frame[0][0]
def draw_dict(dict, w, h, border, figure_name, frame, count):
for trace in dict:
print trace
pts = dict[trace]
pylab.plot([p[0] for p in pts], [p[1] for p in pts],'-',color=(0,0,1),alpha=0.5, linewidth=2)
pylab.xlim(0,w)
pylab.ylim(h,0)
pylab.axis('off')
pylab.subplots_adjust(0,0,1,1,0,0)
pylab.savefig("Frames/" + figure_name + "_" + str(count).zfill(6) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
#pylab.savefig("Frames/" + 'frame' + str(int(frame)) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
print "Starting Frame Generation"
generate_frames(sys.argv[1:])
|
from __future__ import unicode_literals, division, print_function
import json
import math
import pytz
import random
import resource
import six
import sys
import time
import uuid
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import BaseCommand, CommandError
from django.core.management.base import CommandParser
from django.db import connection, transaction
from django.utils import timezone
from django_redis import get_redis_connection
from subprocess import check_call, CalledProcessError
from temba.channels.models import Channel
from temba.channels.tasks import squash_channelcounts
from temba.contacts.models import Contact, ContactField, ContactGroup, ContactURN, ContactGroupCount, URN, TEL_SCHEME, TWITTER_SCHEME
from temba.flows.models import FlowStart, FlowRun
from temba.flows.tasks import squash_flowpathcounts, squash_flowruncounts, prune_recentmessages
from temba.locations.models import AdminBoundary
from temba.msgs.models import Label, Msg
from temba.msgs.tasks import squash_labelcounts
from temba.orgs.models import Org
from temba.orgs.tasks import squash_topupcredits
from temba.utils import chunk_list, ms_to_datetime, datetime_to_str, datetime_to_ms
from temba.values.models import Value
# maximum age in days of database content
CONTENT_AGE = 3 * 365
# every user will have this password including the superuser
USER_PASSWORD = "Qwerty123"
# database dump containing admin boundary records
LOCATIONS_DUMP = 'test-data/nigeria.bin'
# organization names are generated from these components
ORG_NAMES = (
("UNICEF", "WHO", "WFP", "UNESCO", "UNHCR", "UNITAR", "FAO", "UNEP", "UNAIDS", "UNDAF"),
("Nigeria", "Chile", "Indonesia", "Rwanda", "Mexico", "Zambia", "India", "Brazil", "Sudan", "Mozambique")
)
# the users, channels, groups, labels and fields to create for each organization
USERS = (
{'username': "admin%d", 'email': "org%[email protected]", 'role': 'administrators'},
{'username': "editor%d", 'email': "org%[email protected]", 'role': 'editors'},
{'username': "viewer%d", 'email': "org%[email protected]", 'role': 'viewers'},
{'username': "surveyor%d", 'email': "org%[email protected]", 'role': 'surveyors'},
)
CHANNELS = (
{'name': "Android", 'channel_type': Channel.TYPE_ANDROID, 'scheme': 'tel', 'address': "1234"},
{'name': "Nexmo", 'channel_type': Channel.TYPE_NEXMO, 'scheme': 'tel', 'address': "2345"},
{'name': "Twitter", 'channel_type': 'TT', 'scheme': 'twitter', 'address': "my_handle"},
)
FIELDS = (
{'key': 'gender', 'label': "Gender", 'value_type': Value.TYPE_TEXT},
{'key': 'age', 'label': "Age", 'value_type': Value.TYPE_DECIMAL},
{'key': 'joined', 'label': "Joined On", 'value_type': Value.TYPE_DATETIME},
{'key': 'ward', 'label': "Ward", 'value_type': Value.TYPE_WARD},
{'key': 'district', 'label': "District", 'value_type': Value.TYPE_DISTRICT},
{'key': 'state', 'label': "State", 'value_type': Value.TYPE_STATE},
)
GROUPS = (
{'name': "Reporters", 'query': None, 'member': 0.95}, # member is either a probability or callable
{'name': "Farmers", 'query': None, 'member': 0.5},
{'name': "Doctors", 'query': None, 'member': 0.4},
{'name': "Teachers", 'query': None, 'member': 0.3},
{'name': "Drivers", 'query': None, 'member': 0.2},
{'name': "Testers", 'query': None, 'member': 0.1},
{'name': "Empty", 'query': None, 'member': 0.0},
{'name': "Youth (Dynamic)", 'query': 'age <= 18', 'member': lambda c: c['age'] and c['age'] <= 18},
{'name': "Unregistered (Dynamic)", 'query': 'joined = ""', 'member': lambda c: not c['joined']},
{'name': "Districts (Dynamic)", 'query': 'district=Faskari or district=Zuru or district=Anka',
'member': lambda c: c['district'] and c['district'].name in ("Faskari", "Zuru", "Anka")},
)
LABELS = ("Reporting", "Testing", "Youth", "Farming", "Health", "Education", "Trade", "Driving", "Building", "Spam")
FLOWS = (
{'name': "Favorites", 'file': "favorites.json", 'templates': (
["blue", "mutzig", "bob"],
["orange", "green", "primus", "jeb"],
)},
{'name': "SMS Form", 'file': "sms_form.json", 'templates': (["22 F Seattle"], ["35 M MIAMI"])},
{'name': "Pick a Number", 'file': "pick_a_number.json", 'templates': (["1"], ["4"], ["5"], ["7"], ["8"])}
)
# contact names are generated from these components
CONTACT_NAMES = (
("", "Anne", "Bob", "Cathy", "Dave", "Evan", "Freda", "George", "Hallie", "Igor"),
("", "Jameson", "Kardashian", "Lopez", "Mooney", "Newman", "O'Shea", "Poots", "Quincy", "Roberts"),
)
CONTACT_LANGS = (None, "eng", "fre", "spa", "kin")
CONTACT_HAS_TEL_PROB = 0.9 # 9/10 contacts have a phone number
CONTACT_HAS_TWITTER_PROB = 0.1 # 1/10 contacts have a twitter handle
CONTACT_IS_STOPPED_PROB = 0.01 # 1/100 contacts are stopped
CONTACT_IS_BLOCKED_PROB = 0.01 # 1/100 contacts are blocked
CONTACT_IS_DELETED_PROB = 0.005 # 1/200 contacts are deleted
CONTACT_HAS_FIELD_PROB = 0.8 # 8/10 fields set for each contact
RUN_RESPONSE_PROB = 0.1 # 1/10 runs will be responded to
INBOX_MESSAGES = (("What is", "I like", "No"), ("beer", "tea", "coffee"), ("thank you", "please", "today"))
class Command(BaseCommand):
COMMAND_GENERATE = 'generate'
COMMAND_SIMULATE = 'simulate'
help = "Generates a database suitable for performance testing"
def add_arguments(self, parser):
cmd = self
subparsers = parser.add_subparsers(dest='command', help='Command to perform',
parser_class=lambda **kw: CommandParser(cmd, **kw))
gen_parser = subparsers.add_parser('generate', help='Generates a clean testing database')
gen_parser.add_argument('--orgs', type=int, action='store', dest='num_orgs', default=100)
gen_parser.add_argument('--contacts', type=int, action='store', dest='num_contacts', default=1000000)
gen_parser.add_argument('--seed', type=int, action='store', dest='seed', default=None)
sim_parser = subparsers.add_parser('simulate', help='Simulates activity on an existing database')
sim_parser.add_argument('--runs', type=int, action='store', dest='num_runs', default=500)
def handle(self, command, *args, **kwargs):
start = time.time()
if command == self.COMMAND_GENERATE:
self.handle_generate(kwargs['num_orgs'], kwargs['num_contacts'], kwargs['seed'])
else:
self.handle_simulate(kwargs['num_runs'])
time_taken = time.time() - start
self._log("Completed in %d secs, peak memory usage: %d MiB\n" % (int(time_taken), int(self.peak_memory())))
def handle_generate(self, num_orgs, num_contacts, seed):
"""
Creates a clean database
"""
seed = self.configure_random(num_orgs, seed)
self._log("Generating random base database (seed=%d)...\n" % seed)
try:
has_data = Org.objects.exists()
except Exception: # pragma: no cover
raise CommandError("Run migrate command first to create database tables")
if has_data:
raise CommandError("Can't generate content in non-empty database.")
self.batch_size = 5000
# the timespan being modelled by this database
self.db_ends_on = timezone.now()
self.db_begins_on = self.db_ends_on - timedelta(days=CONTENT_AGE)
# this is a new database so clear out redis
self._log("Clearing out Redis cache... ")
r = get_redis_connection()
r.flushdb()
self._log(self.style.SUCCESS("OK") + '\n')
superuser = User.objects.create_superuser("root", "[email protected]", USER_PASSWORD)
country, locations = self.load_locations(LOCATIONS_DUMP)
orgs = self.create_orgs(superuser, country, num_orgs)
self.create_users(orgs)
self.create_channels(orgs)
self.create_fields(orgs)
self.create_groups(orgs)
self.create_labels(orgs)
self.create_flows(orgs)
self.create_contacts(orgs, locations, num_contacts)
def handle_simulate(self, num_runs):
"""
Prepares to resume simulating flow activity on an existing database
"""
self._log("Resuming flow activity simulation on existing database...\n")
orgs = list(Org.objects.order_by('id'))
if not orgs:
raise CommandError("Can't simulate activity on an empty database")
self.configure_random(len(orgs))
# in real life Nexmo messages are throttled, but that's not necessary for this simulation
del Channel.CHANNEL_SETTINGS[Channel.TYPE_NEXMO]['max_tps']
inputs_by_flow_name = {f['name']: f['templates'] for f in FLOWS}
self._log("Preparing existing orgs... ")
for org in orgs:
flows = list(org.flows.order_by('id'))
for flow in flows:
flow.input_templates = inputs_by_flow_name[flow.name]
org.cache = {
'users': list(org.get_org_users().order_by('id')),
'channels': list(org.channels.order_by('id')),
'groups': list(ContactGroup.user_groups.filter(org=org).order_by('id')),
'flows': flows,
'contacts': list(org.org_contacts.values_list('id', flat=True)), # only ids to save memory
'activity': None
}
self._log(self.style.SUCCESS("OK") + '\n')
self.simulate_activity(orgs, num_runs)
def configure_random(self, num_orgs, seed=None):
if not seed:
seed = random.randrange(0, 65536)
self.random = random.Random(seed)
# monkey patch uuid4 so it returns the same UUIDs for the same seed, see https://github.com/joke2k/faker/issues/484#issuecomment-287931101
from temba.utils import models
models.uuid4 = lambda: uuid.UUID(int=(self.random.getrandbits(128) | (1 << 63) | (1 << 78)) & (~(1 << 79) & ~(1 << 77) & ~(1 << 76) & ~(1 << 62)))
# We want a variety of large and small orgs so when allocating content like contacts and messages, we apply a
# bias toward the beginning orgs. if there are N orgs, then the amount of content the first org will be
# allocated is (1/N) ^ (1/bias). This sets the bias so that the first org will get ~50% of the content:
self.org_bias = math.log(1.0 / num_orgs, 0.5)
return seed
def load_locations(self, path):
"""
Loads admin boundary records from the given dump of that table
"""
self._log("Loading locations from %s... " % path)
# load dump into current db with pg_restore
db_config = settings.DATABASES['default']
try:
check_call('export PGPASSWORD=%s && pg_restore -U%s -w -d %s %s' %
(db_config['PASSWORD'], db_config['USER'], db_config['NAME'], path), shell=True)
except CalledProcessError: # pragma: no cover
raise CommandError("Error occurred whilst calling pg_restore to load locations dump")
# fetch as tuples of (WARD, DISTRICT, STATE)
wards = AdminBoundary.objects.filter(level=3).prefetch_related('parent', 'parent__parent')
locations = [(w, w.parent, w.parent.parent) for w in wards]
country = AdminBoundary.objects.filter(level=0).get()
self._log(self.style.SUCCESS("OK") + '\n')
return country, locations
def create_orgs(self, superuser, country, num_total):
"""
Creates and initializes the orgs
"""
self._log("Creating %d orgs... " % num_total)
org_names = ['%s %s' % (o1, o2) for o2 in ORG_NAMES[1] for o1 in ORG_NAMES[0]]
self.random.shuffle(org_names)
orgs = []
for o in range(num_total):
orgs.append(Org(name=org_names[o % len(org_names)], timezone=self.random.choice(pytz.all_timezones),
brand='rapidpro.io', country=country,
created_on=self.db_begins_on, created_by=superuser, modified_by=superuser))
Org.objects.bulk_create(orgs)
orgs = list(Org.objects.order_by('id'))
self._log(self.style.SUCCESS("OK") + "\nInitializing orgs... ")
for o, org in enumerate(orgs):
org.initialize(topup_size=max((1000 - o), 1) * 1000)
# we'll cache some metadata on each org as it's created to save re-fetching things
org.cache = {
'users': [],
'fields': {},
'groups': [],
'system_groups': {g.group_type: g for g in ContactGroup.system_groups.filter(org=org)},
}
self._log(self.style.SUCCESS("OK") + '\n')
return orgs
def create_users(self, orgs):
"""
Creates a user of each type for each org
"""
self._log("Creating %d users... " % (len(orgs) * len(USERS)))
# create users for each org
for org in orgs:
for u in USERS:
user = User.objects.create_user(u['username'] % org.id, u['email'] % org.id, USER_PASSWORD)
getattr(org, u['role']).add(user)
user.set_org(org)
org.cache['users'].append(user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_channels(self, orgs):
"""
Creates the channels for each org
"""
self._log("Creating %d channels... " % (len(orgs) * len(CHANNELS)))
for org in orgs:
user = org.cache['users'][0]
for c in CHANNELS:
Channel.objects.create(org=org, name=c['name'], channel_type=c['channel_type'],
address=c['address'], schemes=[c['scheme']],
created_by=user, modified_by=user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_fields(self, orgs):
"""
Creates the contact fields for each org
"""
self._log("Creating %d fields... " % (len(orgs) * len(FIELDS)))
for org in orgs:
user = org.cache['users'][0]
for f in FIELDS:
field = ContactField.objects.create(org=org, key=f['key'], label=f['label'],
value_type=f['value_type'], show_in_table=True,
created_by=user, modified_by=user)
org.cache['fields'][f['key']] = field
self._log(self.style.SUCCESS("OK") + '\n')
def create_groups(self, orgs):
"""
Creates the contact groups for each org
"""
self._log("Creating %d groups... " % (len(orgs) * len(GROUPS)))
for org in orgs:
user = org.cache['users'][0]
for g in GROUPS:
if g['query']:
group = ContactGroup.create_dynamic(org, user, g['name'], g['query'])
else:
group = ContactGroup.user_groups.create(org=org, name=g['name'], created_by=user, modified_by=user)
group.member = g['member']
group.count = 0
org.cache['groups'].append(group)
self._log(self.style.SUCCESS("OK") + '\n')
def create_labels(self, orgs):
"""
Creates the message labels for each org
"""
self._log("Creating %d labels... " % (len(orgs) * len(LABELS)))
for org in orgs:
user = org.cache['users'][0]
for name in LABELS:
Label.label_objects.create(org=org, name=name, created_by=user, modified_by=user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_flows(self, orgs):
"""
Creates the flows for each org
"""
self._log("Creating %d flows... " % (len(orgs) * len(FLOWS)))
for org in orgs:
user = org.cache['users'][0]
for f in FLOWS:
with open('media/test_flows/' + f['file'], 'r') as flow_file:
org.import_app(json.load(flow_file), user)
self._log(self.style.SUCCESS("OK") + '\n')
def create_contacts(self, orgs, locations, num_contacts):
"""
Creates test and regular contacts for this database. Returns tuples of org, contact id and the preferred urn
id to avoid trying to hold all contact and URN objects in memory.
"""
group_counts = defaultdict(int)
self._log("Creating %d test contacts..." % (len(orgs) * len(USERS)))
for org in orgs:
test_contacts = []
for user in org.cache['users']:
test_contacts.append(Contact.get_test_contact(user))
org.cache['test_contacts'] = test_contacts
self._log(self.style.SUCCESS("OK") + '\n')
self._log("Creating %d regular contacts...\n" % num_contacts)
# disable table triggers to speed up insertion and in the case of contact group m2m, avoid having an unsquashed
# count row for every contact
with DisableTriggersOn(Contact, ContactURN, Value, ContactGroup.contacts.through):
names = [('%s %s' % (c1, c2)).strip() for c2 in CONTACT_NAMES[1] for c1 in CONTACT_NAMES[0]]
names = [n if n else None for n in names]
batch_num = 1
for index_batch in chunk_list(six.moves.xrange(num_contacts), self.batch_size):
batch = []
# generate flat representations and contact objects for this batch
for c_index in index_batch: # pragma: no cover
org = self.random_org(orgs)
name = self.random_choice(names)
location = self.random_choice(locations) if self.probability(CONTACT_HAS_FIELD_PROB) else None
created_on = self.timeline_date(c_index / num_contacts)
c = {
'org': org,
'user': org.cache['users'][0],
'name': name,
'groups': [],
'tel': '+2507%08d' % c_index if self.probability(CONTACT_HAS_TEL_PROB) else None,
'twitter': '%s%d' % (name.replace(' ', '_').lower() if name else 'tweep', c_index) if self.probability(CONTACT_HAS_TWITTER_PROB) else None,
'gender': self.random_choice(('M', 'F')) if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'age': self.random.randint(16, 80) if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'joined': self.random_date() if self.probability(CONTACT_HAS_FIELD_PROB) else None,
'ward': location[0] if location else None,
'district': location[1] if location else None,
'state': location[2] if location else None,
'language': self.random_choice(CONTACT_LANGS),
'is_stopped': self.probability(CONTACT_IS_STOPPED_PROB),
'is_blocked': self.probability(CONTACT_IS_BLOCKED_PROB),
'is_active': self.probability(1 - CONTACT_IS_DELETED_PROB),
'created_on': created_on,
'modified_on': self.random_date(created_on, self.db_ends_on),
}
# work out which system groups this contact belongs to
if c['is_active']:
if not c['is_blocked'] and not c['is_stopped']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_ALL])
if c['is_blocked']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_BLOCKED])
if c['is_stopped']:
c['groups'].append(org.cache['system_groups'][ContactGroup.TYPE_STOPPED])
# let each user group decide if it is taking this contact
for g in org.cache['groups']:
if g.member(c) if callable(g.member) else self.probability(g.member):
c['groups'].append(g)
# track changes to group counts
for g in c['groups']:
group_counts[g] += 1
batch.append(c)
self._create_contact_batch(batch)
self._log(" > Created batch %d of %d\n" % (batch_num, max(num_contacts // self.batch_size, 1)))
batch_num += 1
# create group count records manually
counts = []
for group, count in group_counts.items():
counts.append(ContactGroupCount(group=group, count=count, is_squashed=True))
group.count = count
ContactGroupCount.objects.bulk_create(counts)
def _create_contact_batch(self, batch):
"""
Bulk creates a batch of contacts from flat representations
"""
for c in batch:
c['object'] = Contact(org=c['org'], name=c['name'], language=c['language'],
is_stopped=c['is_stopped'], is_blocked=c['is_blocked'],
is_active=c['is_active'],
created_by=c['user'], created_on=c['created_on'],
modified_by=c['user'], modified_on=c['modified_on'])
Contact.objects.bulk_create([c['object'] for c in batch])
# now that contacts have pks, bulk create the actual URN, value and group membership objects
batch_urns = []
batch_values = []
batch_memberships = []
for c in batch:
org = c['org']
c['urns'] = []
if c['tel']:
c['urns'].append(ContactURN(org=org, contact=c['object'], priority=50, scheme=TEL_SCHEME,
path=c['tel'], identity=URN.from_tel(c['tel'])))
if c['twitter']:
c['urns'].append(ContactURN(org=org, contact=c['object'], priority=50, scheme=TWITTER_SCHEME,
path=c['twitter'], identity=URN.from_twitter(c['twitter'])))
if c['gender']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['gender'],
string_value=c['gender']))
if c['age']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['age'],
string_value=str(c['age']), decimal_value=c['age']))
if c['joined']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['joined'],
string_value=datetime_to_str(c['joined']), datetime_value=c['joined']))
if c['ward']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['ward'],
string_value=c['ward'].name, location_value=c['ward']))
if c['district']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['district'],
string_value=c['district'].name, location_value=c['district']))
if c['state']:
batch_values.append(Value(org=org, contact=c['object'], contact_field=org.cache['fields']['state'],
string_value=c['state'].name, location_value=c['state']))
for g in c['groups']:
batch_memberships.append(ContactGroup.contacts.through(contact=c['object'], contactgroup=g))
batch_urns += c['urns']
ContactURN.objects.bulk_create(batch_urns)
Value.objects.bulk_create(batch_values)
ContactGroup.contacts.through.objects.bulk_create(batch_memberships)
def simulate_activity(self, orgs, num_runs):
self._log("Starting simulation. Ctrl+C to cancel...\n")
runs = 0
while runs < num_runs:
try:
with transaction.atomic():
# make sure every org has an active flow
for org in orgs:
if not org.cache['activity']:
self.start_flow_activity(org)
with transaction.atomic():
org = self.random_org(orgs)
if self.probability(0.1):
self.create_unsolicited_incoming(org)
else:
self.create_flow_run(org)
runs += 1
except KeyboardInterrupt:
self._log("Shutting down...\n")
break
squash_channelcounts()
squash_flowpathcounts()
squash_flowruncounts()
prune_recentmessages()
squash_topupcredits()
squash_labelcounts()
def start_flow_activity(self, org):
assert not org.cache['activity']
user = org.cache['users'][0]
flow = self.random_choice(org.cache['flows'])
if self.probability(0.9):
# start a random group using a flow start
group = self.random_choice(org.cache['groups'])
contacts_started = list(group.contacts.values_list('id', flat=True))
self._log(" > Starting flow %s for group %s (%d) in org %s\n"
% (flow.name, group.name, len(contacts_started), org.name))
start = FlowStart.create(flow, user, groups=[group], restart_participants=True)
start.start()
else:
# start a random individual without a flow start
if not org.cache['contacts']:
return
contact = Contact.objects.get(id=self.random_choice(org.cache['contacts']))
contacts_started = [contact.id]
self._log(" > Starting flow %s for contact #%d in org %s\n" % (flow.name, contact.id, org.name))
flow.start([], [contact], restart_participants=True)
org.cache['activity'] = {'flow': flow, 'unresponded': contacts_started, 'started': list(contacts_started)}
def end_flow_activity(self, org):
self._log(" > Ending flow %s for in org %s\n" % (org.cache['activity']['flow'].name, org.name))
org.cache['activity'] = None
runs = FlowRun.objects.filter(org=org, is_active=True)
FlowRun.bulk_exit(runs, FlowRun.EXIT_TYPE_EXPIRED)
def create_flow_run(self, org):
activity = org.cache['activity']
flow = activity['flow']
if activity['unresponded']:
contact_id = self.random_choice(activity['unresponded'])
activity['unresponded'].remove(contact_id)
contact = Contact.objects.get(id=contact_id)
urn = contact.urns.first()
if urn:
self._log(" > Receiving flow responses for flow %s in org %s\n" % (flow.name, flow.org.name))
inputs = self.random_choice(flow.input_templates)
for text in inputs:
channel = flow.org.cache['channels'][0]
Msg.create_incoming(channel, six.text_type(urn), text)
# if more than 10% of contacts have responded, consider flow activity over
if len(activity['unresponded']) <= (len(activity['started']) * 0.9):
self.end_flow_activity(flow.org)
def create_unsolicited_incoming(self, org):
if not org.cache['contacts']:
return
self._log(" > Receiving unsolicited incoming message in org %s\n" % org.name)
available_contacts = list(set(org.cache['contacts']) - set(org.cache['activity']['started']))
if available_contacts:
contact = Contact.objects.get(id=self.random_choice(available_contacts))
channel = self.random_choice(org.cache['channels'])
urn = contact.urns.first()
if urn:
text = ' '.join([self.random_choice(l) for l in INBOX_MESSAGES])
Msg.create_incoming(channel, six.text_type(urn), text)
def probability(self, prob):
return self.random.random() < prob
def random_choice(self, seq, bias=1.0):
if not seq:
raise ValueError("Can't select random item from empty sequence")
return seq[int(math.pow(self.random.random(), bias) * len(seq))]
def weighted_choice(self, seq, weights):
r = self.random.random() * sum(weights)
cum_weight = 0.0
for i, item in enumerate(seq):
cum_weight += weights[i]
if r < cum_weight or (i == len(seq) - 1):
return item
def random_org(self, orgs):
"""
Returns a random org with bias toward the orgs with the lowest indexes
"""
return self.random_choice(orgs, bias=self.org_bias)
def random_date(self, start=None, end=None):
if not end:
end = timezone.now()
if not start:
start = end - timedelta(days=365)
if start == end:
return end
return ms_to_datetime(self.random.randrange(datetime_to_ms(start), datetime_to_ms(end)))
def timeline_date(self, dist):
"""
Converts a 0..1 distance into a date on this database's overall timeline
"""
seconds_span = (self.db_ends_on - self.db_begins_on).total_seconds()
return self.db_begins_on + timedelta(seconds=(seconds_span * dist))
@staticmethod
def peak_memory():
rusage_denom = 1024
if sys.platform == 'darwin':
# OSX gives value in bytes, other OSes in kilobytes
rusage_denom *= rusage_denom
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
def _log(self, text):
self.stdout.write(text, ending='')
self.stdout.flush()
class DisableTriggersOn(object):
"""
Helper context manager for temporarily disabling database triggers for a given model
"""
def __init__(self, *models):
self.tables = [m._meta.db_table for m in models]
def __enter__(self):
with connection.cursor() as cursor:
for table in self.tables:
cursor.execute('ALTER TABLE %s DISABLE TRIGGER ALL;' % table)
def __exit__(self, exc_type, exc_val, exc_tb):
with connection.cursor() as cursor:
for table in self.tables:
cursor.execute('ALTER TABLE %s ENABLE TRIGGER ALL;' % table)
|
import unittest
import re
from nose.tools import eq_, ok_
from django.test.client import RequestFactory
from django.core.cache import cache
from fancy_cache.memory import find_urls
from . import views
class TestViews(unittest.TestCase):
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_render_home1(self):
request = self.factory.get('/anything')
response = views.home(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
# do it again
response = views.home(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_1, random_string_2)
def test_render_home2(self):
authenticated = RequestFactory(AUTH_USER='peter')
request = self.factory.get('/2')
response = views.home2(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
# do it again
response = views.home2(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_1, random_string_2)
# do it again, but with a hint to disable cache
request = authenticated.get('/2')
response = views.home2(request)
eq_(response.status_code, 200)
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
def test_render_home3(self):
request = self.factory.get('/anything')
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
extra_random_1 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
extra_random_2 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
eq_(random_string_1, random_string_2)
# the post_process_response is only called once
eq_(extra_random_1, extra_random_2)
def test_render_home3_no_cache(self):
factory = RequestFactory(AUTH_USER='peter')
request = factory.get('/3')
response = views.home3(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
ok_('In your HTML' not in response.content.decode("utf8"))
def test_render_home4(self):
request = self.factory.get('/4')
response = views.home4(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
extra_random_1 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
response = views.home4(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
extra_random_2 = re.findall('In your HTML:(\w+)', response.content.decode("utf8"))[0]
ok_('In your HTML' in response.content.decode("utf8"))
eq_(random_string_1, random_string_2)
# the post_process_response is now called every time
ok_(extra_random_1 != extra_random_2)
def test_render_home5(self):
request = self.factory.get('/4', {'foo': 'bar'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
request = self.factory.get('/4', {'foo': 'baz'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
request = self.factory.get('/4', {'foo': 'baz', 'other': 'junk'})
response = views.home5(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_3 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_2, random_string_3)
def test_render_home5bis(self):
request = self.factory.get('/4', {'foo': 'bar'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_1 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
request = self.factory.get('/4', {'foo': 'baz'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_2 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
ok_(random_string_1 != random_string_2)
request = self.factory.get('/4', {'foo': 'baz', 'bar': 'foo'})
response = views.home5bis(request)
eq_(response.status_code, 200)
ok_(re.findall('Random:\w+', response.content.decode("utf8")))
random_string_3 = re.findall('Random:(\w+)', response.content.decode("utf8"))[0]
eq_(random_string_2, random_string_3)
def test_remember_stats_all_urls(self):
request = self.factory.get('/anything')
response = views.home6(request)
eq_(response.status_code, 200)
# now ask the memory thing
match, = find_urls(urls=['/anything'])
eq_(match[0], '/anything')
eq_(match[2]['hits'], 0)
eq_(match[2]['misses'], 1)
# second time
response = views.home6(request)
eq_(response.status_code, 200)
match, = find_urls(urls=['/anything'])
eq_(match[0], '/anything')
eq_(match[2]['hits'], 1)
eq_(match[2]['misses'], 1)
def test_remember_stats_all_urls_looong_url(self):
request = self.factory.get(
'/something/really/long/to/start/with/right/here/since/this/will/'
'test/that/things/work/with/long/urls/too',
{
'line1': 'Bad luck, wind been blowing at my back',
'line2': "I was born to bring trouble to wherever I'm at",
'line3': "Got the number thirteen, tattooed on my neck",
'line4': "When the ink starts to itch, ",
'line5': "then the black will turn to red",
}
)
response = views.home6(request)
eq_(response.status_code, 200)
# now ask the memory thing
match, = find_urls()
ok_(match[0].startswith('/something/really'))
eq_(match[2]['hits'], 0)
eq_(match[2]['misses'], 1)
# second time
response = views.home6(request)
eq_(response.status_code, 200)
match, = find_urls([])
ok_(match[0].startswith('/something/really'))
eq_(match[2]['hits'], 1)
eq_(match[2]['misses'], 1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygobstoneslang.common.utils as utils
import pygobstoneslang.common.i18n as i18n
from pygobstoneslang.common.tools import tools
import pygobstoneslang.lang as lang
from pygobstoneslang.lang.gbs_api import GobstonesRun
import logging
import os
import traceback
def setup_logger():
pygbs_path = os.path.join(os.path.expanduser("~"), ".pygobstones")
logger = logging.getLogger("lang")
formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(name)s] %(message)s')
if not os.path.exists(pygbs_path):
os.mkdir(pygbs_path)
filehandler = logging.FileHandler(os.path.join(pygbs_path, "pygobstones-lang.log"))
filehandler.setFormatter(formatter)
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
logger.addHandler(filehandler)
logger.addHandler(consolehandler)
logger.setLevel(logging.WARNING)
setup_logger()
class GUIExecutionAPI(lang.ExecutionAPI):
def __init__(self, communicator):
self.comm = communicator
def read(self):
self.comm.send('READ_REQUEST')
message = self.comm.receive()
if message.header != 'READ_DONE':
assert False
return message.body
def show(self, board):
self.comm.send('PARTIAL', tools.board_format.to_string(board))
def log(self, msg):
self.comm.send('LOG', msg)
class ProgramWorker(object):
class RunMode:
FULL = 'full'
ONLY_CHECK = 'only_check'
NAMES = 'names'
def __init__(self, communicator):
self.communicator = communicator
def prepare(self):
pass
def start(self, filename, program_text, initial_board_string):
pass
def exit(self):
pass
def run(self):
self.prepare()
message = self.communicator.receive()
while not message.header in ['START', 'EXIT']:
print("Lang got an unexpected message '%s:%s'" % (message.header, message.body))
if message.header == 'EXIT':
self.exit()
return
filename, program_text, initial_board_string, run_mode, gobstones_version = message.body
self.start(
filename,
program_text,
initial_board_string,
run_mode,
gobstones_version
)
class GobstonesWorker(ProgramWorker):
def prepare(self):
self.api = GUIExecutionAPI(self.communicator)
def start(self, filename, program_text, initial_board_string,
run_mode, gobstones_version="xgobstones"):
if run_mode == GobstonesWorker.RunMode.ONLY_CHECK:
options = lang.GobstonesOptions(
lang_version=gobstones_version,
check_liveness=True,
lint_mode="strict"
)
else:
options = lang.GobstonesOptions(lang_version=gobstones_version)
self.gobstones = lang.Gobstones(options, self.api)
try:
if run_mode == GobstonesWorker.RunMode.FULL:
board = tools.board_format.from_string(initial_board_string)
self.success(self.gobstones.run(filename, program_text, board))
elif run_mode == GobstonesWorker.RunMode.ONLY_CHECK:
# Parse gobstones script
self.gobstones.api.log(i18n.i18n('Parsing.'))
gbs_run = self.gobstones.parse(filename, program_text)
assert gbs_run.tree
# Check semantics, liveness and types
self.gobstones.check(gbs_run.tree)
self.success()
elif run_mode == GobstonesWorker.RunMode.NAMES:
self.success(self.gobstones.parse_names(filename, program_text))
else:
raise Exception(
"There is no action associated " +
"with the given run mode."
)
except Exception as exception:
self.failure(exception)
logging.getLogger("lang").error(
"%s\n%s" % (exception, traceback.format_exc())
)
def success(self, result=None):
if result is None:
self.communicator.send('OK', (None, None))
elif isinstance(result, GobstonesRun):
self.communicator.send('OK', (
tools.board_format.to_string(result.final_board),
result.result
))
elif isinstance(result, dict):
self.communicator.send('OK', (result,))
else:
assert False
def failure(self, exception):
if hasattr(exception, 'area'):
self.communicator.send(
'FAIL',
(exception.__class__,
(exception.msg, exception.area))
)
elif hasattr(exception, 'msg'):
self.communicator.send(
'FAIL',
(exception.__class__, (exception.msg, ))
)
else:
self.communicator.send(
'FAIL',
(utils.GobstonesException, (str(exception),))
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hb_balancer
# High performance load balancer between Helbreath World Servers.
#
# Copyright (C) 2012 Michał Papierski <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import random
import logging
from twisted.internet import reactor
from twisted.protocols.stateful import StatefulProtocol
from twisted.python import log
from packets import Packets
class BaseHelbreathProtocol(StatefulProtocol):
''' Basic Helbreath Protocol '''
def getInitialState(self):
'''
Protocol overview:
[Key unsigned byte] [Size unsigned short] [Data Size-bytes]
'''
return (self.get_key, 1)
def get_key(self, data):
''' Get key '''
self.key, = struct.unpack('<B', data)
return (self.get_data_size, 2)
def get_data_size(self, data):
''' Read data size '''
self.data_size, = struct.unpack('<H', data)
return (self.get_data, self.data_size - 3)
def get_data(self, data):
''' Read encoded data and decode it '''
if self.key > 0:
# Decode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) ^ (self.key ^ (self.data_size - 3 - i))) - (i ^ self.key)) % 256)
data = ''.join(data)
# Pass decoded data
self.raw_data(data)
return (self.get_key, 1)
def send_message(self, data):
''' Send a Helbreath Packet data '''
key = random.randint(0, 255)
if key > 0:
# Encode
data = list(data)
for i in range(len(data)):
data[i] = chr(((ord(data[i]) + (i ^ key)) ^ (key ^ (len(data) - i))) % 256)
data = ''.join(data)
self.transport.write(struct.pack('<BH', key, len(data) + 3) + data)
def raw_data(self, data):
''' Got packet '''
pass
class ProxyHelbreathProtocol(BaseHelbreathProtocol):
''' Proxy Helbreath protocol used for proxying packets '''
def connectionMade(self):
self.factory.success(self)
def login(self, account_name, account_password, world_name):
''' Request a login '''
# Casting to str is made for sure
# world_name could be either str or unicode.
self.send_message(struct.pack('<IH10s10s30s',
Packets.MSGID_REQUEST_LOGIN, # MsgID
0, # MsgType
str(account_name),
str(account_password),
str(world_name)))
def raw_data(self, data):
self.factory.receiver(data)
self.transport.loseConnection()
class HelbreathProtocol(BaseHelbreathProtocol):
def raw_data(self, data):
# Header
msg_id, msg_type = struct.unpack('<IH', data[:6])
# Process packet data
if msg_id == Packets.MSGID_REQUEST_LOGIN:
# Client is requesting login
packet_format = '<10s10s30s'
account_name, account_password, world_name = struct.unpack(
packet_format,
data[6:]
)
self.request_login(
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
world_name.rstrip('\x00')
)
elif msg_id == Packets.MSGID_REQUEST_ENTERGAME:
# Client is trying to enter game
packet_format = '<10s10s10s10si30s120s'
player_name, map_name, account_name, account_password, \
level, world_name, cmd_line = struct.unpack(
packet_format,
data[6:])
self.request_entergame(
msg_type,
player_name.rstrip('\x00'),
map_name.rstrip('\x00'),
account_name.rstrip('\x00'),
account_password.rstrip('\x00'),
level,
world_name.rstrip('\x00'),
cmd_line.rstrip('\x00'))
else:
# Abort if a packet is not (yet) known
self.transport.loseConnection()
def request_login(self, account_name, account_password, world_name):
''' Request client login
account_name -- Account name
account_password -- Account password
world_name -- World server name
'''
def world_is_down(failure = None):
''' The requested world is offline '''
self.send_message(struct.pack('<IH',
Packets.MSGID_RESPONSE_LOG,
Packets.DEF_LOGRESMSGTYPE_NOTEXISTINGWORLDSERVER))
reactor.callLater(10, self.transport.loseConnection)
def handle_response(data):
''' Pass data and close the connection nicely '''
self.send_message(data)
reactor.callLater(10, self.transport.loseConnection)
def connection_made(remote):
''' Connection is made. Request a login. '''
log.msg('Remote connection made!')
remote.login(
account_name,
account_password,
remote.factory.world_name
)
# Request connection to a world by its name, pass some callbacks
self.factory.connect_to_world(
world_name = world_name,
receiver = handle_response,
success = connection_made,
failure = world_is_down)
log.msg('Request world %s' % (world_name, ))
def request_entergame(self, msg_type, player_name, map_name, account_name,
account_password, level, world_name, cmd_line):
''' Client wants to enter game. '''
log.msg('Request entergame player(%s) map(%s) account(%s) world(%s)' % (
player_name, map_name, account_name, world_name))
def connection_made(remote):
''' Request enter game, construct exacly the same data.
TODO: Parse the msg_type. '''
log.msg('Requesting enter game...')
remote.send_message(struct.pack('<IH10s10s10s10si30s120s',
Packets.MSGID_REQUEST_ENTERGAME,
msg_type,
player_name,
map_name,
account_name,
account_password,
level,
str(remote.factory.world_name),
cmd_line))
def error_handler(failure = None):
''' Unable to connect to destination world '''
log.err('Enter game error for account(%s) at world(%s)' % (
account_name,
world_name))
self.send_message(struct.pack('<IHB',
Packets.MSGID_RESPONSE_ENTERGAME,
Packets.DEF_ENTERGAMERESTYPE_REJECT,
Packets.DEF_REJECTTYPE_DATADIFFERENCE))
reactor.callLater(10, self.transport.loseConnection)
def response_handler(data):
''' Pass the (modified) data '''
self.send_message(data)
self.factory.connect_to_world(
world_name = world_name,
receiver = response_handler,
success = connection_made,
failure = error_handler
)
|
from bs4 import BeautifulSoup
import requests
import json
from collections import OrderedDict
# TODO: Add OAuth2 so api can maximize rate limit of 30 requests per min
# for individual repo fetching using github search API.
class GhShowcaseAPI(object):
""""""
def __init__(self,):
pass
def get_showcase(self, showcase_name,):
"""
Returns meta-data and repos displayed in a specific github showcase
"""
raw_showcase_html_page = requests.get("https://github.com/showcases/{0}".format(showcase_name))
showcase_pg_tree = BeautifulSoup(raw_showcase_html_page.content, "html.parser")
# NOTE: Scraped showcase attributes
showcase_title = showcase_pg_tree.title.string
showcase_description = showcase_pg_tree.find("div", class_="markdown-body showcase-page-description").get_text().strip("\n")
showcase_meta_info = showcase_pg_tree.find_all("span", class_="meta-info")
(repo_count, language_count, last_updated) = (" ".join(meta_datum.get_text().split()) for meta_datum in showcase_meta_info)
raw_showcase_repo_list = showcase_pg_tree.find_all("li", class_="repo-list-item repo-list-item-with-avatar")
showcase_resp = (
("showcase_title", showcase_title),
("showcase_description", showcase_description),
("repo_count", repo_count),
("languages_count", language_count),
("last_updated", last_updated),
("showcased_repos", self._build_showcased_repos_list(raw_showcase_repo_list,)),
) # end showcase_resp
return json.dumps(OrderedDict(showcase_resp), indent=4,) # OrderedDict maintains ordering at time of transfer
def _build_showcased_repos_list(self, raw_showcase_html_list,):
"""
Collect individual showcased repo data to pass back to the showcase
response object
"""
showcased_repos = []
def check_if_description_provided(repo_description):
"""
Repo descriptions are not always provided by the curator of
the repository.
"""
if repo_description == None:
return "No description available"
elif repo_description != None:
return repo_description.get_text()
for repo_tag in raw_showcase_html_list:
repo_name = repo_tag.span.string
repo_description = check_if_description_provided(repo_tag.p)
repo_url = "https://github.com{0}".format(repo_tag.h3.a["href"])
repo = self._build_showcase_repo(repo_name=repo_name,
repo_description=repo_description,
repo_url=repo_url,
) # end repo
showcased_repos.append(repo)
return showcased_repos
def _build_showcase_repo(self, repo_name, repo_description, repo_url,):
"""
Parse raw html containing the showcase repo data
"""
return {"repo_name": repo_name,
"repo_description": repo_description,
"repo_url": repo_url,
} # end repo
def get_showcased_repo(self, repo_name):
"""Get a specific repo within a showcase"""
pass
def get_available_showcases(self,):
"""Get all showcased topics currently available on github"""
pass
|
# -*- coding: utf-8 -*-
#
# align.py - align commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
from functools import reduce
import re
__commander_module__ = True
def _get_groups(m, group, add_ws_group):
if len(m.groups()) <= group - 1:
gidx = 0
else:
gidx = group
if len(m.groups()) <= add_ws_group - 1:
wsidx = 0
else:
wsidx = add_ws_group
# Whitespace group must be contained in align group
if m.start(wsidx) < m.start(gidx) or m.end(wsidx) > m.end(gidx):
wsidx = gidx
return (gidx, wsidx)
class Line:
def __init__(self, line, reg, tabwidth):
self.tabwidth = tabwidth
self.line = line
# All the separators
self.matches = list(reg.finditer(line))
# @newline initially contains the first column
if not self.matches:
# No separator found
self.newline = str(line)
else:
# Up to first separator
self.newline = line[0:self.matches[0].start(0)]
def matches_len(self):
return len(self.matches)
def new_len(self, extra=''):
return len((self.newline + extra).expandtabs(self.tabwidth))
def match(self, idx):
if idx >= self.matches_len():
return None
return self.matches[idx]
def append(self, idx, num, group, add_ws_group):
m = self.match(idx)
if m == None:
return
gidx, wsidx = _get_groups(m, group, add_ws_group)
# Append leading match
self.newline += self.line[m.start(0):m.start(gidx)]
# Now align by replacing wsidx with spaces
prefix = self.line[m.start(gidx):m.start(wsidx)]
suffix = self.line[m.end(wsidx):m.end(gidx)]
sp = ''
while True:
bridge = prefix + sp + suffix
if self.new_len(bridge) < num:
sp += ' '
else:
break
self.newline += bridge
# Then append the rest of the match
mnext = self.match(idx + 1)
if mnext == None:
endidx = None
else:
endidx = mnext.start(0)
self.newline += self.line[m.end(gidx):endidx]
def __str__(self):
return self.newline
def _find_max_align(lines, idx, group, add_ws_group):
num = 0
# We will align on 'group', by adding spaces to 'add_ws_group'
for line in lines:
m = line.match(idx)
if m != None:
gidx, wsidx = _get_groups(m, group, add_ws_group)
# until the start
extra = line.line[m.start(0):m.start(wsidx)] + line.line[m.end(wsidx):m.end(gidx)]
# Measure where to align it
l = line.new_len(extra)
else:
l = line.new_len()
if l > num:
num = l
return num
def _regex(view, reg, group, additional_ws, add_ws_group, flags=0):
buf = view.get_buffer()
# Get the selection of lines to align columns on
bounds = buf.get_selection_bounds()
if not bounds:
start = buf.get_iter_at_mark(buf.get_insert())
start.set_line_offset(0)
end = start.copy()
if not end.ends_line():
end.forward_to_line_end()
bounds = (start, end)
if not bounds[0].equal(bounds[1]) and bounds[1].starts_line():
bounds[1].backward_line()
if not bounds[1].ends_line():
bounds[1].forward_to_line_end()
# Get the regular expression from the user
if reg == None:
reg, words, modifier = (yield commander.commands.result.Prompt('Regex:'))
# Compile the regular expression
try:
reg = re.compile(reg, flags)
except Exception as e:
raise commander.commands.exceptions.Execute('Failed to compile regular expression: %s' % (e,))
# Query the user to provide a regex group number to align on
if group == None:
group, words, modifier = (yield commander.commands.result.Prompt('Group (1):'))
try:
group = int(group)
except:
group = 1
# Query the user for additional whitespace to insert for separating items
if additional_ws == None:
additional_ws, words, modifier = (yield commander.commands.result.Prompt('Additional whitespace (0):'))
try:
additional_ws = int(additional_ws)
except:
additional_ws = 0
# Query the user for the regex group number on which to add the
# whitespace
if add_ws_group == None:
add_ws_group, words, modifier = (yield commander.commands.result.Prompt('Whitespace group (1):'))
try:
add_ws_group = int(add_ws_group)
except:
add_ws_group = -1
# By default, add the whitespace on the group on which the columns are
# aligned
if add_ws_group < 0:
add_ws_group = group
start, end = bounds
if not start.starts_line():
start.set_line_offset(0)
if not end.ends_line():
end.forward_to_line_end()
lines = start.get_text(end).splitlines()
newlines = []
num = 0
tabwidth = view.get_tab_width()
# Construct Line objects for all the lines
newlines = [Line(line, reg, tabwidth) for line in lines]
# Calculate maximum number of matches (i.e. columns)
num = reduce(lambda x, y: max(x, y.matches_len()), newlines, 0)
for i in range(num):
al = _find_max_align(newlines, i, group, add_ws_group)
for line in newlines:
line.append(i, al + additional_ws, group, add_ws_group)
# Replace lines
aligned = str.join('\n', [x.newline for x in newlines])
buf.begin_user_action()
buf.delete(bounds[0], bounds[1])
m = buf.create_mark(None, bounds[0], True)
buf.insert(bounds[1], aligned)
buf.select_range(buf.get_iter_at_mark(m), bounds[1])
buf.delete_mark(m)
buf.end_user_action()
yield commander.commands.result.DONE
def __default__(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-sensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group)
def i(view, reg='\s+', align_group=1, padding=1, padding_group=-1):
"""Align selected in columns using a regular expression: align.regex [<regex>=<i>\s+</i>] [<align-group>] [<padding>] [<padding-group>=<i><align-group></i>]
Align the selected text in columns separated by the specified regular expression.
The optional <align-group> argument specifies on which group in the regular expression
the text should be aligned and defaults to 1 (or 0 in the case that there is
no explicit group specified). The <align-group> will be <b>replaced</b>
with whitespace to align the columns. The optional <padding> argument can
be used to add additional whitespace to the column separation. The last
optional argument (<padding-group>) can be used to specify a separate
group (which must be contained in <align-group>) which to replace with
whitespace.
The regular expression will be matched in case-insensitive mode"""
yield _regex(view, reg, align_group, padding, padding_group, re.IGNORECASE)
# ex:ts=4:et
|
""" Unit Test of Workflow Modules
"""
import unittest
import itertools
import os
import copy
import shutil
from mock import MagicMock as Mock
from DIRAC import gLogger
class ModulesTestCase( unittest.TestCase ):
""" Base class for the Modules test cases
"""
def setUp( self ):
gLogger.setLevel( 'ERROR' )
# import sys
# sys.modules["DIRAC"] = DIRAC.ResourceStatusSystem.test.fake_Logger
# sys.modules["DIRAC.ResourceStatusSystem.Utilities.CS"] = DIRAC.ResourceStatusSystem.test.fake_Logger
self.jr_mock = Mock()
self.jr_mock.setApplicationStatus.return_value = {'OK': True, 'Value': ''}
self.jr_mock.generateRequest.return_value = {'OK': True, 'Value': 'pippo'}
self.jr_mock.setJobParameter.return_value = {'OK': True, 'Value': 'pippo'}
self.jr_mock.generateForwardDISET.return_value = {'OK': True, 'Value': 'pippo'}
# self.jr_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': 'pippo'}
self.fr_mock = Mock()
self.fr_mock.getFiles.return_value = {}
self.fr_mock.setFileStatus.return_value = {'OK': True, 'Value': ''}
self.fr_mock.commit.return_value = {'OK': True, 'Value': ''}
self.fr_mock.generateRequest.return_value = {'OK': True, 'Value': ''}
rc_mock = Mock()
rc_mock.update.return_value = {'OK': True, 'Value': ''}
rc_mock.setDISETRequest.return_value = {'OK': True, 'Value': ''}
rc_mock.isEmpty.return_value = {'OK': True, 'Value': ''}
rc_mock.toXML.return_value = {'OK': True, 'Value': ''}
rc_mock.getDigest.return_value = {'OK': True, 'Value': ''}
rc_mock.__len__.return_value = 1
self.rc_mock = rc_mock
ar_mock = Mock()
ar_mock.commit.return_value = {'OK': True, 'Value': ''}
self.rm_mock = Mock()
self.rm_mock.getReplicas.return_value = {'OK': True, 'Value':{'Successful':{'pippo':'metadataPippo'},
'Failed':None}}
self.rm_mock.getCatalogFileMetadata.return_value = {'OK': True, 'Value':{'Successful':{'pippo':'metadataPippo'},
'Failed':None}}
self.rm_mock.removeFile.return_value = {'OK': True, 'Value': {'Failed':False}}
self.rm_mock.putStorageDirectory.return_value = {'OK': True, 'Value': {'Failed':False}}
self.rm_mock.addCatalogFile.return_value = {'OK': True, 'Value': {'Failed':False}}
self.rm_mock.putAndRegister.return_value = {'OK': True, 'Value': {'Failed':False}}
self.rm_mock.getFile.return_value = {'OK': True, 'Value': {'Failed':False}}
self.jsu_mock = Mock()
self.jsu_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': ''}
self.jsu_mock = Mock()
self.jsu_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': ''}
request_mock = Mock()
request_mock.addSubRequest.return_value = {'OK': True, 'Value': ''}
request_mock.setSubRequestFiles.return_value = {'OK': True, 'Value': ''}
request_mock.getNumSubRequests.return_value = {'OK': True, 'Value': ''}
request_mock._getLastOrder.return_value = 1
self.ft_mock = Mock()
self.ft_mock.transferAndRegisterFile.return_value = {'OK': True, 'Value': {'uploadedSE':''}}
self.ft_mock.transferAndRegisterFileFailover.return_value = {'OK': True, 'Value': {}}
self.nc_mock = Mock()
self.nc_mock.sendMail.return_value = {'OK': True, 'Value': ''}
self.prod_id = 123
self.prod_job_id = 456
self.wms_job_id = 0
self.workflowStatus = {'OK':True}
self.stepStatus = {'OK':True}
self.wf_commons = [{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ), 'eventType': '123456789', 'jobType': 'merge',
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData', 'numberOfEvents':'100',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'runNumber':'Unknown', 'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'merge',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData', 'numberOfEvents':'100',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'runNumber':'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'merge',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData', 'numberOfEvents':'100',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'merge',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData', 'numberOfEvents':'100',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'appSteps': ['someApp_1'] },
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'runNumber':'Unknown', 'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'runNumber':'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'InputData': '', 'appSteps': ['someApp_1'] },
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'InputData': 'foo;bar', 'appSteps': ['someApp_1'] },
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'InputData': 'foo;bar', 'ParametricInputData':'' ,
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str( self.prod_id ), 'JOB_ID': str( self.prod_job_id ),
'configName': 'aConfigName', 'configVersion': 'aConfigVersion', 'outputDataFileMask':'', 'jobType': 'reco',
'BookkeepingLFNs':'aa', 'ProductionOutputData':'ProductionOutputData',
'JobReport':self.jr_mock, 'Request':rc_mock, 'AccountingReport': ar_mock, 'FileReport':self.fr_mock,
'SystemConfig':'sys_config', 'LogFilePath':'someDir', 'LogTargetPath':'someOtherDir',
'runNumber':'Unknown', 'InputData': 'foo;bar', 'ParametricInputData':'pid1;pid2;pid3',
'appSteps': ['someApp_1']},
]
self.step_commons = [{'applicationName':'someApp', 'applicationVersion':'v1r0', 'eventType': '123456789',
'applicationLog':'appLog', 'extraPackages':'', 'XMLSummary':'XMLSummaryFile',
'numberOfEvents':'100', 'BKStepID':'123', 'StepProcPass':'Sim123', 'outputFilePrefix':'pref_',
'STEP_INSTANCE_NAME':'someApp_1',
'listoutput':[{'outputDataName':str( self.prod_id ) + '_' + str( self.prod_job_id ) + '_', 'outputDataSE':'aaa',
'outputDataType':'bbb'}]},
{'applicationName':'someApp', 'applicationVersion':'v1r0', 'eventType': '123456789',
'applicationLog':'appLog', 'extraPackages':'', 'XMLSummary':'XMLSummaryFile',
'numberOfEvents':'100', 'BKStepID':'123', 'StepProcPass':'Sim123', 'outputFilePrefix':'pref_',
'optionsLine': '',
'STEP_INSTANCE_NAME':'someApp_1',
'listoutput':[{'outputDataName':str( self.prod_id ) + '_' + str( self.prod_job_id ) + '_', 'outputDataSE':'aaa',
'outputDataType':'bbb'}]},
{'applicationName':'someApp', 'applicationVersion':'v1r0', 'eventType': '123456789',
'applicationLog':'appLog', 'extraPackages':'', 'XMLSummary':'XMLSummaryFile',
'numberOfEvents':'100', 'BKStepID':'123', 'StepProcPass':'Sim123', 'outputFilePrefix':'pref_',
'extraOptionsLine': 'blaBla',
'STEP_INSTANCE_NAME':'someApp_1',
'listoutput':[{'outputDataName':str( self.prod_id ) + '_' + str( self.prod_job_id ) + '_', 'outputDataSE':'aaa',
'outputDataType':'bbb'}]}
]
self.step_number = '321'
self.step_id = '%s_%s_%s' % ( self.prod_id, self.prod_job_id, self.step_number )
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase
self.mb = ModuleBase()
self.mb.rm = self.rm_mock
self.mb.request = self.rc_mock
self.mb.jobReport = self.jr_mock
self.mb.fileReport = self.fr_mock
self.mb.workflow_commons = self.wf_commons[0]
from DIRAC.Workflow.Modules.FailoverRequest import FailoverRequest
self.fr = FailoverRequest()
self.fr.request = self.rc_mock
self.fr.jobReport = self.jr_mock
self.fr.fileReport = self.fr_mock
from DIRAC.Workflow.Modules.Script import Script
self.script = Script()
self.script.request = self.rc_mock
self.script.jobReport = self.jr_mock
self.script.fileReport = self.fr_mock
def tearDown( self ):
for fileProd in ['appLog', 'foo.txt', 'aaa.Bhadron.dst', 'bbb.Calibration.dst', 'bar_2.py', 'foo_1.txt',
'ccc.charm.mdst', 'prova.txt', 'foo.txt', 'BAR.txt', 'FooBAR.ext.txt', 'applicationLog.txt',
'ErrorLogging_Step1_coredump.log', '123_00000456_request.xml', 'lfn1', 'lfn2',
'aaa.bhadron.dst', 'bbb.calibration.dst', 'ProductionOutputData', 'data.py',
'00000123_00000456.tar', 'someOtherDir', 'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK',
]:
try:
os.remove( fileProd )
except OSError:
continue
for directory in ['./job', 'job']:
try:
shutil.rmtree( directory )
except:
continue
#############################################################################
# ModuleBase.py
#############################################################################
class ModuleBaseSuccess( ModulesTestCase ):
#################################################
def test__checkLocalExistance( self ):
self.assertRaises( OSError, self.mb._checkLocalExistance, ['aaa', 'bbb'] )
#################################################
def test__applyMask( self ):
candidateFiles = {'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'allstreams.dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}}
fileMasks = ( ['dst'], 'dst', ['sim'], ['digi'], ['digi', 'sim'], 'allstreams.dst' )
stepMasks = ( '', '5', '', ['2'], ['1', '3'], '' )
results = (
{'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'}
},
{},
{'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}
},
{'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
},
{'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}
},
{'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'allstreams.dst',
'workflowSE': 'Tier1_MC_M-DST'}
}
)
for fileMask, result, stepMask in itertools.izip( fileMasks, results, stepMasks ):
res = self.mb._applyMask( candidateFiles, fileMask, stepMask )
self.assertEqual( res, result )
#################################################
def test__checkSanity( self ):
candidateFiles = {'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'DST',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}}
self.assertRaises( ValueError, self.mb._checkSanity, candidateFiles )
#################################################
def test_getCandidateFiles( self ):
# this needs to avoid the "checkLocalExistance"
open( 'foo_1.txt', 'w' ).close()
open( 'bar_2.py', 'w' ).close()
outputList = [{'outputDataType': 'txt', 'outputDataSE': 'Tier1-RDST', 'outputDataName': 'foo_1.txt'},
{'outputDataType': 'py', 'outputDataSE': 'Tier1-RDST', 'outputDataName': 'bar_2.py'}]
outputLFNs = ['/lhcb/MC/2010/DST/00012345/0001/foo_1.txt', '/lhcb/MC/2010/DST/00012345/0001/bar_2.py']
fileMask = 'txt'
stepMask = ''
result = {'foo_1.txt': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/foo_1.txt',
'type': outputList[0]['outputDataType'],
'workflowSE': outputList[0]['outputDataSE']}}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
fileMask = ['txt', 'py']
stepMask = None
result = {'foo_1.txt': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/foo_1.txt',
'type': outputList[0]['outputDataType'],
'workflowSE': outputList[0]['outputDataSE']},
'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']},
}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
fileMask = ['aa']
stepMask = None
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
result = {}
self.assertEqual( res, result )
fileMask = ''
stepMask = '2'
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
fileMask = ''
stepMask = 2
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
fileMask = ''
stepMask = ['2', '3']
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
fileMask = ''
stepMask = ['3']
result = {}
res = self.mb.getCandidateFiles( outputList, outputLFNs, fileMask, stepMask )
self.assertEqual( res, result )
def test__enableModule( self ):
self.mb.production_id = self.prod_id
self.mb.prod_job_id = self.prod_job_id
self.mb.jobID = self.wms_job_id
self.mb.workflowStatus = self.workflowStatus
self.mb.stepStatus = self.stepStatus
self.mb.workflow_commons = self.wf_commons[0] ##APS: this is needed
self.mb.step_commons = self.step_commons[0]
self.mb.step_number = self.step_number
self.mb.step_id = self.step_id
self.mb.execute()
self.assertFalse( self.mb._enableModule() )
self.mb.jobID = 1
self.mb.execute()
self.assertTrue( self.mb._enableModule() )
def test__determineStepInputData( self ):
self.mb.stepName = 'DaVinci_2'
inputData = 'previousStep'
self.mb.appSteps = ['Brunel_1', 'DaVinci_2']
self.mb.workflow_commons = {'outputList': [{'stepName': 'Brunel_1',
'outputDataType': 'brunelhist',
'outputBKType': 'BRUNELHIST',
'outputDataSE': 'CERN-HIST',
'outputDataName': 'Brunel_00012345_00006789_1_Hist.root'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': '00012345_00006789_1.sdst'}
]
}
self.mb.inputDataType = 'SDST'
first = self.mb._determineStepInputData( inputData )
second = ['00012345_00006789_1.sdst']
self.assertEqual( first, second )
inputData = 'previousStep'
self.mb.appSteps = ['Brunel_1', 'DaVinci_2']
self.mb.workflow_commons['outputList'] = [{'stepName': 'Brunel_1',
'outputDataType': 'brunelhist',
'outputBKType': 'BRUNELHIST',
'outputDataSE': 'CERN-HIST',
'outputDataName': 'Brunel_00012345_00006789_1_Hist.root'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': 'some.sdst'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': '00012345_00006789_1.sdst'}
]
self.mb.inputDataType = 'SDST'
first = self.mb._determineStepInputData( inputData )
second = ['some.sdst', '00012345_00006789_1.sdst']
self.assertEqual( first, second )
inputData = 'LFN:123.raw'
first = self.mb._determineStepInputData( inputData )
second = ['123.raw']
self.assertEqual( first, second )
#############################################################################
# FailoverRequest.py
#############################################################################
class FailoverRequestSuccess( ModulesTestCase ):
#################################################
def test_execute( self ):
self.fr.jobType = 'merge'
self.fr.stepInputData = ['foo', 'bar']
self.fr.production_id = self.prod_id
self.fr.prod_job_id = self.prod_job_id
self.fr.jobID = self.wms_job_id
self.fr.workflowStatus = self.workflowStatus
self.fr.stepStatus = self.stepStatus
self.fr.workflow_commons = self.wf_commons
self.fr.step_commons = self.step_commons[0]
self.fr.step_number = self.step_number
self.fr.step_id = self.step_id
# no errors, no input data
for wf_commons in copy.deepcopy( self.wf_commons ):
for step_commons in self.step_commons:
self.fr.workflow_commons = wf_commons
self.fr.step_commons = step_commons
res = self.fr.execute()
self.assert_( res['OK'] )
#############################################################################
# Scripy.py
#############################################################################
class ScriptSuccess( ModulesTestCase ):
#################################################
def test_execute( self ):
self.script.jobType = 'merge'
self.script.stepInputData = ['foo', 'bar']
self.script.production_id = self.prod_id
self.script.prod_job_id = self.prod_job_id
self.script.jobID = self.wms_job_id
self.script.workflowStatus = self.workflowStatus
self.script.stepStatus = self.stepStatus
self.script.workflow_commons = self.wf_commons
self.script.step_commons = self.step_commons[0]
self.script.step_number = self.step_number
self.script.step_id = self.step_id
self.script.executable = 'ls'
self.script.applicationLog = 'applicationLog.txt'
# no errors, no input data
for wf_commons in copy.deepcopy( self.wf_commons ):
for step_commons in self.step_commons:
self.script.workflow_commons = wf_commons
self.script.step_commons = step_commons
self.script._setCommand()
res = self.script._executeCommand()
self.assertIsNone( res )
class ScriptFailure( ModulesTestCase ):
#################################################
def test_execute( self ):
self.script.jobType = 'merge'
self.script.stepInputData = ['foo', 'bar']
self.script.production_id = self.prod_id
self.script.prod_job_id = self.prod_job_id
self.script.jobID = self.wms_job_id
self.script.workflowStatus = self.workflowStatus
self.script.stepStatus = self.stepStatus
self.script.workflow_commons = self.wf_commons
self.script.step_commons = self.step_commons[0]
self.script.step_number = self.step_number
self.script.step_id = self.step_id
# no errors, no input data
for wf_commons in copy.deepcopy( self.wf_commons ):
for step_commons in self.step_commons:
self.script.workflow_commons = wf_commons
self.script.step_commons = step_commons
res = self.script.execute()
self.assertFalse( res['OK'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( ModulesTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ModuleBaseSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( FailoverRequestSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
#encoding: utf8
import pdb
import numpy as np
import matplotlib.pyplot as plt
from data_loader import load_data
from cv_common import one_std_error_rule, gen_CV_samples_by_K_folds
'''
在特征之间有一定线性相关度时(协方差不为0),一个大的特征参数有可能会增加而另一些变为负数或趋于0
这导致方差很大
L1通过收缩参数,减小了相关特征的影响,部分特征的参数项可以为0
f(x) = seta * x
J(X;seta) = 1/2n*(f(X) - Y)**2 + lambda * np.linalg.norm(seta, 1)
1. 坐标下降法求解
psj = partial J/partial seta_j
= 1/n*sum_i[(f(X_i) - Y_i)*X_ij] + r_l1
= 1/n*sum_i[(seta * X_i - Y_i) * X_ij] + r_l1
= 1/n*sum_i[sum_k(seta_k * X_ik<k!=j>)*X_ij + seta_j * X_ij**2 - Y_i*X_ij] + r_l1
or
= 1/n*sum_i[(seta * X_i - Y_i) * X_ij - seta_j * X_ij**2 + seta_j*X_ij**2] + r_l1
let:
p_j = 1/n*sum_i[(Y_i - seta * X_i) * X_ij + seta_j * X_ij**2]
z_j = 1/n*sum_i(X_ij**2)
ps = -p_j + seta_j*z_j + r_l1
r_l1 = lambda, seta_j > 0
[-lambda, lambda], seta_j = 0
-lambda, seta_j < 0
seta_j = (p_j - lambd)/z_j, if p_j > lambd
= (p_j + lambd)/z_j, if p_j < -lambd
= 0, else
2. 最小角回归求解(LARS)
waiting
用k折交叉验证法进行df选择
注意下:
1. 如果不用bias/intercept, 对Y要进行均值化
2. numpy 的 svd, 返回的是V的转秩,用的时候需要再转秩一下
3. 求估计(均值)的标准差时,记得除以sqrt(n)
4. 对数据标准化时,倾向于用全局数据,并除以方差,即: xi = (xi - mean_hat)/sigma_hat
不过本例子给的示例只做了中心标准化,未除以方差(个人觉得需要除以方差)
但在全局子集选择里,CV方法下又做了标准化(即除了方差)
稍后这儿也试一下标准化后的结果
'''
def lasso_cd(X, Y, lamb):
it_count = 5000
epilson = 1e-6
n,m = X.shape
seta = np.ones(m)
mse = 1e10
mse_new = 1e10
for it_i in xrange(it_count):
for j in xrange(m):
Xj2Xj = np.dot(X[:,j], X[:,j])
p_j = 1./n * (np.dot(Y-np.dot(X, seta), X[:,j]) + seta[j]*Xj2Xj)
z_j = 1./n * Xj2Xj
if p_j > lamb:
seta[j] = (p_j - lamb)/z_j
elif p_j < -lamb:
seta[j] = (p_j + lamb)/z_j
else:
seta[j] = 0.
err1 = np.dot(X, seta) - Y
mse_new = np.dot(err1, err1) / n
if np.abs(mse_new - mse) < epilson:
break
mse = mse_new
return seta, mse_new
def lasso_leasq_cd_CV():
'''
注:实际训练时,标准化应该针对train_X_CV进行,不能把test_X_CV加进来, 避免影响样本的独立性
本试验为了方便,把这个加起来了
'''
train_X, train_Y, test_X, test_Y, dt_stand_fun = load_data(type=1, need_bias=0, y_standard=1)
K = 10
lamb_lst = np.logspace(-3, 0, 100)
train_mid_rst = []
cv_samples = gen_CV_samples_by_K_folds(train_X, train_Y, K)
for lid in xrange(len(lamb_lst)):
lamb = lamb_lst[lid]
test_mses = np.zeros(K)
ki_rst = []
for ki in range(K):
X_CV, Y_CV, X_t_CV, Y_t_CV = cv_samples[ki]
# wait for coding
seta, train_mse = lasso_cd(X_CV, Y_CV, lamb)
y_hat_err = np.dot(X_t_CV, seta) - Y_t_CV
test_mse = np.dot(y_hat_err, y_hat_err) / len(Y_t_CV)
df = len(np.where(np.abs(seta) < 1e-5)[0])
ki_rst.append((lamb, seta, df, train_mse, test_mse))
train_mid_rst.append(ki_rst)
#计算不同lamb下的训练误差和方差
dfs = np.zeros(len(lamb_lst))
mse_means = np.zeros(len(lamb_lst))
mse_mean_stds = np.zeros(len(lamb_lst))
for lid in range(len(lamb_lst)):
#K折CV下误差均值和标准差
test_msees = np.array(map(lambda i:train_mid_rst[lid][i][4], range(0,K)))
train_msees = np.array(map(lambda i:train_mid_rst[lid][i][3], range(0,K)))
mse_means[lid] = test_msees.mean()
#!!!!!注意:这儿求的是估计的标准差 1/K mean(sum(Xi)), 故而要除以K
mse_mean_stds[lid] = test_msees.std()/np.sqrt(K)
print "lasso CD for lambda: %.4f, CV train mse: %.4f, test mse: %.4f, std: %.4f" % \
(lamb_lst[lid], train_msees.mean(), mse_means[lid], mse_mean_stds[lid])
'''
#一倍方差准则
'''
best_lamb_id, minid = one_std_error_rule(mse_means, mse_mean_stds)
best_lamb = lamb_lst[best_lamb_id]
print "Best lambid: %d, lambda: %.4f, degree of free: %.4f" % (best_lamb_id, best_lamb, dfs[best_lamb_id])
one_std_val = mse_means[minid] + mse_mean_stds[minid]
plt.plot((dfs[0],dfs[-1]), (one_std_val, one_std_val), 'r-')
plt.errorbar(dfs, mse_means, yerr=mse_mean_stds, fmt='-o')
plt.savefig('images/lasso_mse_errorbar.png', format='png')
#用K折选出来的最优 lambda 进行回归预测
'''
#非一倍方差准则
best_lamb_id = np.argmin(mse_means)
best_lamb = lamb_lst[best_lamb_id]
'''
seta, train_mse = lasso_cd(train_X, train_Y, best_lamb)
y_hat_err = np.dot(test_X, seta) - test_Y
test_mse = np.dot(y_hat_err, y_hat_err) / len(test_Y)
print "Test error: train mse: %.4f, test mse: %.4f" % (train_mse, test_mse)
print "seta: %s" % seta
if __name__ == '__main__':
print "lasso leasq by corr descent:"
lasso_leasq_cd_CV()
print ""
|
#!/usr/bin/env python
import codecs
from setuptools import setup, find_packages
url='http://github.com/mila/django-noticebox/tree/master'
try:
long_description = codecs.open('README.rst', "r", "utf-8").read()
except IOError:
long_description = "See %s" % url
setup(
name='django-noticebox',
version=__import__("noticebox").__version__,
description='Django-noticebox is a reusable Django application which '
'provides functionality for sending notices to site users. '
'The notices can be displayed when user signs in, '
'sent by email or both.',
long_description=long_description,
author='Miloslav Pojman',
author_email='[email protected]',
url=url,
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False,
)
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Abstract base classes for the SearchIO object model."""
import sys
# Add path to Bio
sys.path.append('../../..')
from Bio._utils import getattr_str, trim_str
__docformat__ = "restructuredtext en"
class _BaseSearchObject(object):
"""Abstract class for SearchIO objects."""
_NON_STICKY_ATTRS = ()
def _transfer_attrs(self, obj):
"""Transfer instance attributes to the given object.
This method is used to transfer attributes set externally (for example
using `setattr`) to a new object created from this one (for example
from slicing).
The reason this method is necessary is because different parsers will
set different attributes for each QueryResult, Hit, HSP, or HSPFragment
objects, depending on the attributes they found in the search output
file. Ideally, we want these attributes to 'stick' with any new instance
object created from the original one.
"""
# list of attribute names we don't want to transfer
for attr in self.__dict__:
if attr not in self._NON_STICKY_ATTRS:
setattr(obj, attr, self.__dict__[attr])
class _BaseHSP(_BaseSearchObject):
"""Abstract base class for HSP objects."""
def _str_hsp_header(self):
"""Prints the alignment header info."""
lines = []
# set query id line
qid_line = trim_str(' Query: %s %s' %
(self.query_id, self.query_description), 80, '...')
# set hit id line
hid_line = trim_str(' Hit: %s %s' %
(self.hit_id, self.hit_description), 80, '...')
lines.append(qid_line)
lines.append(hid_line)
# coordinates
query_start = getattr_str(self, 'query_start')
query_end = getattr_str(self, 'query_end')
hit_start = getattr_str(self, 'hit_start')
hit_end = getattr_str(self, 'hit_end')
# strands
try:
qstrand = self.query_strand
hstrand = self.hit_strand
except ValueError:
qstrand = self.query_strand_all[0]
hstrand = self.hit_strand_all[0]
lines.append('Query range: [%s:%s] (%r)' % (query_start, query_end,
qstrand))
lines.append(' Hit range: [%s:%s] (%r)' % (hit_start, hit_end,
hstrand))
return '\n'.join(lines)
|
# coding: utf-8
# license: GPLv3
"""Модуль визуализации.
Нигде, кроме этого модуля, не используются экранные координаты объектов.
Функции, создающие гaрафические объекты и перемещающие их на экране, принимают физические координаты
"""
header_font = "Arial-16"
"""Шрифт в заголовке"""
window_width = 800
"""Ширина окна"""
window_height = 800
"""Высота окна"""
scale_factor = None
"""Масштабирование экранных координат по отношению к физическим.
Тип: float
Мера: количество пикселей на один метр."""
def calculate_scale_factor(max_distance):
"""Вычисляет значение глобальной переменной **scale_factor** по данной характерной длине"""
global scale_factor
scale_factor = 0.4*min(window_height, window_width)/max_distance
print('Scale factor:', scale_factor)
def scale_x(x):
"""Возвращает экранную **x** координату по **x** координате модели.
Принимает вещественное число, возвращает целое число.
В случае выхода **x** координаты за пределы экрана возвращает
координату, лежащую за пределами холста.
Параметры:
**x** — x-координата модели.
"""
return int(x*scale_factor) + window_width//2
def scale_y(y):
"""Возвращает экранную **y** координату по **y** координате модели.
Принимает вещественное число, возвращает целое число.
В случае выхода **y** координаты за пределы экрана возвращает
координату, лежащую за пределами холста.
Направление оси развёрнуто, чтобы у модели ось **y** смотрела вверх.
Параметры:
**y** — y-координата модели.
"""
return int(y*scale_factor) + window_width//2 # FIXME: not done yet
def create_star_image(space, star):
"""Создаёт отображаемый объект звезды.
Параметры:
**space** — холст для рисования.
**star** — объект звезды.
"""
x = scale_x(star.x)
y = scale_y(star.y)
r = star.R
star.image = space.create_oval([x - r, y - r], [x + r, y + r], fill=star.color)
def create_planet_image(space, planet):
"""Создаёт отображаемый объект планеты.
Параметры:
**space** — холст для рисования.
**planet** — объект планеты.
"""
x = scale_x(planet.x)
y = scale_y(planet.y)
r = planet.R
planet.image = space.create_oval([x - r, y - r], [x + r, y + r], fill=planet.color) # FIXME: сделать как у звезды
def update_system_name(space, system_name):
"""Создаёт на холсте текст с названием системы небесных тел.
Если текст уже был, обновляет его содержание.
Параметры:
**space** — холст для рисования.
**system_name** — название системы тел.
"""
space.create_text(30, 80, tag="header", text=system_name, font=header_font)
def update_object_position(space, body):
"""Перемещает отображаемый объект на холсте.
Параметры:
**space** — холст для рисования.
**body** — тело, которое нужно переместить.
"""
x = scale_x(body.x)
y = scale_y(body.y)
r = body.R
if x + r < 0 or x - r > window_width or y + r < 0 or y - r > window_height:
space.coords(body.image, window_width + r, window_height + r,
window_width + 2*r, window_height + 2*r) # положить за пределы окна
space.coords(body.image, x - r, y - r, x + r, y + r)
if __name__ == "__main__":
print("This module is not for direct call!")
|
from tuned import exports
import tuned.logs
import tuned.exceptions
from tuned.exceptions import TunedException
import threading
import tuned.consts as consts
from tuned.utils.commands import commands
__all__ = ["Controller"]
log = tuned.logs.get()
class TimerStore(object):
def __init__(self):
self._timers = dict()
self._timers_lock = threading.Lock()
def store_timer(self, token, timer):
with self._timers_lock:
self._timers[token] = timer
def drop_timer(self, token):
with self._timers_lock:
try:
timer = self._timers[token]
timer.cancel()
del self._timers[token]
except:
pass
def cancel_all(self):
with self._timers_lock:
for timer in self._timers.values():
timer.cancel()
self._timers.clear()
class Controller(tuned.exports.interfaces.ExportableInterface):
"""
Controller's purpose is to keep the program running, start/stop the tuning,
and export the controller interface (currently only over D-Bus).
"""
def __init__(self, daemon, global_config):
super(Controller, self).__init__()
self._daemon = daemon
self._global_config = global_config
self._terminate = threading.Event()
self._cmd = commands()
self._timer_store = TimerStore()
def run(self):
"""
Controller main loop. The call is blocking.
"""
log.info("starting controller")
res = self.start()
daemon = self._global_config.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON)
if not res and daemon:
exports.start()
if daemon:
self._terminate.clear()
# we have to pass some timeout, otherwise signals will not work
while not self._cmd.wait(self._terminate, 10):
pass
log.info("terminating controller")
self.stop()
def terminate(self):
self._terminate.set()
@exports.signal("sbs")
def profile_changed(self, profile_name, result, errstr):
pass
# exports decorator checks the authorization (currently through polkit), caller is None if
# no authorization was performed (i.e. the call should process as authorized), string
# identifying caller (with DBus it's the caller bus name) if authorized and empty
# string if not authorized, caller must be the last argument
def _log_capture_abort(self, token):
tuned.logs.log_capture_finish(token)
self._timer_store.drop_timer(token)
@exports.export("ii", "s")
def log_capture_start(self, log_level, timeout, caller = None):
if caller == "":
return ""
token = tuned.logs.log_capture_start(log_level)
if token is None:
return ""
if timeout > 0:
timer = threading.Timer(timeout,
self._log_capture_abort, args = [token])
self._timer_store.store_timer(token, timer)
timer.start()
return "" if token is None else token
@exports.export("s", "s")
def log_capture_finish(self, token, caller = None):
if caller == "":
return ""
res = tuned.logs.log_capture_finish(token)
self._timer_store.drop_timer(token)
return "" if res is None else res
@exports.export("", "b")
def start(self, caller = None):
if caller == "":
return False
if self._global_config.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON):
if self._daemon.is_running():
return True
elif not self._daemon.is_enabled():
return False
return self._daemon.start()
@exports.export("", "b")
def stop(self, caller = None):
if caller == "":
return False
if not self._daemon.is_running():
res = True
else:
res = self._daemon.stop()
self._timer_store.cancel_all()
return res
@exports.export("", "b")
def reload(self, caller = None):
if caller == "":
return False
if self._daemon.is_running():
stop_ok = self.stop()
if not stop_ok:
return False
try:
self._daemon.reload_profile_config()
except TunedException as e:
log.error("Failed to reload TuneD: %s" % e)
return False
return self.start()
def _switch_profile(self, profile_name, manual):
was_running = self._daemon.is_running()
msg = "OK"
success = True
reapply = False
try:
if was_running:
self._daemon.stop(profile_switch = True)
self._daemon.set_profile(profile_name, manual)
except tuned.exceptions.TunedException as e:
success = False
msg = str(e)
if was_running and self._daemon.profile.name == profile_name:
log.error("Failed to reapply profile '%s'. Did it change on disk and break?" % profile_name)
reapply = True
else:
log.error("Failed to apply profile '%s'" % profile_name)
finally:
if was_running:
if reapply:
log.warn("Applying previously applied (possibly out-dated) profile '%s'." % profile_name)
elif not success:
log.info("Applying previously applied profile.")
self._daemon.start()
return (success, msg)
@exports.export("s", "(bs)")
def switch_profile(self, profile_name, caller = None):
if caller == "":
return (False, "Unauthorized")
return self._switch_profile(profile_name, True)
@exports.export("", "(bs)")
def auto_profile(self, caller = None):
if caller == "":
return (False, "Unauthorized")
profile_name = self.recommend_profile()
return self._switch_profile(profile_name, False)
@exports.export("", "s")
def active_profile(self, caller = None):
if caller == "":
return ""
if self._daemon.profile is not None:
return self._daemon.profile.name
else:
return ""
@exports.export("", "(ss)")
def profile_mode(self, caller = None):
if caller == "":
return "unknown", "Unauthorized"
manual = self._daemon.manual
if manual is None:
# This means no profile is applied. Check the preset value.
try:
profile, manual = self._cmd.get_active_profile()
if manual is None:
manual = profile is not None
except TunedException as e:
mode = "unknown"
error = str(e)
return mode, error
mode = consts.ACTIVE_PROFILE_MANUAL if manual else consts.ACTIVE_PROFILE_AUTO
return mode, ""
@exports.export("", "s")
def post_loaded_profile(self, caller = None):
if caller == "":
return ""
return self._daemon.post_loaded_profile or ""
@exports.export("", "b")
def disable(self, caller = None):
if caller == "":
return False
if self._daemon.is_running():
self._daemon.stop()
if self._daemon.is_enabled():
self._daemon.set_all_profiles(None, True, None,
save_instantly=True)
return True
@exports.export("", "b")
def is_running(self, caller = None):
if caller == "":
return False
return self._daemon.is_running()
@exports.export("", "as")
def profiles(self, caller = None):
if caller == "":
return []
return self._daemon.profile_loader.profile_locator.get_known_names()
@exports.export("", "a(ss)")
def profiles2(self, caller = None):
if caller == "":
return []
return self._daemon.profile_loader.profile_locator.get_known_names_summary()
@exports.export("s", "(bsss)")
def profile_info(self, profile_name, caller = None):
if caller == "":
return tuple(False, "", "", "")
if profile_name is None or profile_name == "":
profile_name = self.active_profile()
return tuple(self._daemon.profile_loader.profile_locator.get_profile_attrs(profile_name, [consts.PROFILE_ATTR_SUMMARY, consts.PROFILE_ATTR_DESCRIPTION], [""]))
@exports.export("", "s")
def recommend_profile(self, caller = None):
if caller == "":
return ""
return self._daemon.profile_recommender.recommend()
@exports.export("", "b")
def verify_profile(self, caller = None):
if caller == "":
return False
return self._daemon.verify_profile(ignore_missing = False)
@exports.export("", "b")
def verify_profile_ignore_missing(self, caller = None):
if caller == "":
return False
return self._daemon.verify_profile(ignore_missing = True)
@exports.export("", "a{sa{ss}}")
def get_all_plugins(self, caller = None):
"""Return dictionary with accesible plugins
Return:
dictionary -- {plugin_name: {parameter_name: default_value}}
"""
if caller == "":
return False
plugins = {}
for plugin_class in self._daemon.get_all_plugins():
plugin_name = plugin_class.__module__.split(".")[-1].split("_", 1)[1]
conf_options = plugin_class._get_config_options()
plugins[plugin_name] = {}
for key, val in conf_options.items():
plugins[plugin_name][key] = str(val)
return plugins
@exports.export("s","s")
def get_plugin_documentation(self, plugin_name, caller = None):
"""Return docstring of plugin's class"""
if caller == "":
return False
return self._daemon.get_plugin_documentation(str(plugin_name))
@exports.export("s","a{ss}")
def get_plugin_hints(self, plugin_name, caller = None):
"""Return dictionary with plugin's parameters and their hints
Parameters:
plugin_name -- name of plugin
Return:
dictionary -- {parameter_name: hint}
"""
if caller == "":
return False
return self._daemon.get_plugin_hints(str(plugin_name))
|
from routines import cspline_transform, cspline_sample4d, slice_time
from transform import Affine, apply_affine, BRAIN_RADIUS_MM
import numpy as np
from scipy import optimize
DEFAULT_SPEEDUP = 4
DEFAULT_OPTIMIZER = 'powell'
DEFAULT_WITHIN_LOOPS = 2
DEFAULT_BETWEEN_LOOPS = 5
def grid_coords(xyz, affine, from_world, to_world):
Tv = np.dot(from_world, np.dot(affine, to_world))
XYZ = apply_affine(Tv, xyz)
return XYZ[0,:], XYZ[1,:], XYZ[2,:]
class Image4d(object):
"""
Class to represent a sequence of 3d scans acquired on a slice-by-slice basis.
"""
def __init__(self, array, to_world, tr, tr_slices=None, start=0.0,
slice_order='ascending', interleaved=False, slice_axis=2):
"""
Configure fMRI acquisition time parameters.
tr : inter-scan repetition time, i.e. the time elapsed between two consecutive scans
tr_slices : inter-slice repetition time, same as tr for slices
start : starting acquisition time respective to the implicit time origin
slice_order : string or array
"""
self.array = array
self.to_world = to_world
nslices = array.shape[slice_axis]
# Default slice repetition time (no silence)
if tr_slices == None:
tr_slices = tr/float(nslices)
# Set slice order
if isinstance(slice_order, str):
if not interleaved:
aux = range(nslices)
else:
p = nslices/2
aux = []
for i in range(p):
aux.extend([i,p+i])
if nslices%2:
aux.append(nslices-1)
if slice_order == 'descending':
aux.reverse()
slice_order = aux
# Set timing values
self.nslices = nslices
self.tr = float(tr)
self.tr_slices = float(tr_slices)
self.start = float(start)
self.slice_order = np.asarray(slice_order)
self.interleaved = bool(interleaved)
## assume that the world referential is 'scanner' as defined
## by the nifti norm
self.reversed_slices = to_world[slice_axis][slice_axis]<0
def z_to_slice(self, z):
"""
Account for the fact that slices may be stored in reverse
order wrt the scanner coordinate system convention (slice 0 ==
bottom of the head)
"""
if self.reversed_slices:
return self.nslices - 1 - z
else:
return z
def to_time(self, z, t):
"""
t = to_time(zv, tv)
zv, tv are grid coordinates; t is an actual time value.
"""
return(self.start + self.tr*t + slice_time(self.z_to_slice(z), self.tr_slices, self.slice_order))
def from_time(self, z, t):
"""
tv = from_time(zv, t)
zv, tv are grid coordinates; t is an actual time value.
"""
return((t - self.start - slice_time(self.z_to_slice(z), self.tr_slices, self.slice_order))/self.tr)
def get_data(self):
return self.array
def get_affine(self):
return self.to_world
class Realign4d(object):
def __init__(self,
im4d,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER,
transforms=None):
self.optimizer = optimizer
dims = im4d.array.shape
self.dims = dims
self.nscans = dims[3]
# Define mask
speedup = max(1, int(speedup))
xyz = np.mgrid[0:dims[0]:speedup, 0:dims[1]:speedup, 0:dims[2]:speedup]
self.xyz = xyz.reshape(3, np.prod(xyz.shape[1::]))
masksize = self.xyz.shape[1]
self.data = np.zeros([masksize, self.nscans], dtype='double')
# Initialize space/time transformation parameters
self.to_world = im4d.to_world
self.from_world = np.linalg.inv(self.to_world)
if transforms == None:
self.transforms = [Affine('rigid', radius=BRAIN_RADIUS_MM) for scan in range(self.nscans)]
else:
self.transforms = transforms
self.from_time = im4d.from_time
self.timestamps = im4d.tr*np.array(range(self.nscans))
# Compute the 4d cubic spline transform
self.cbspline = cspline_transform(im4d.array)
def resample_inmask(self, t):
X, Y, Z = grid_coords(self.xyz, self.transforms[t], self.from_world, self.to_world)
T = self.from_time(Z, self.timestamps[t])
cspline_sample4d(self.data[:,t], self.cbspline, X, Y, Z, T)
def resample_all_inmask(self):
for t in range(self.nscans):
print('Resampling scan %d/%d' % (t+1, self.nscans))
self.resample_inmask(t)
def init_motion_detection(self, t):
"""
The idea is to compute the global variance using the following
decomposition:
V = (n-1)/n V1 + (n-1)/n^2 (x1-m1)^2
= alpha + beta d2,
with alpha=(n-1)/n V1, beta = (n-1)/n^2, d2 = (x1-m1)^2.
Only the second term is variable when one image moves while
all other images are fixed.
"""
self.resample_inmask(t)
fixed = range(self.nscans)
fixed.remove(t)
aux = self.data[:, fixed]
self.m1 = aux.mean(1)
self.d2 = np.zeros(np.shape(self.m1))
self.alpha = ((self.nscans-1.0)/self.nscans)*aux.var(1).mean()
self.beta = (self.nscans-1.0)/self.nscans**2
def msid(self, t):
"""
Mean square intensity difference
"""
self.resample_inmask(t)
self.d2[:] = self.data[:,t]
self.d2 -= self.m1
self.d2 **= 2
return self.d2.mean()
def variance(self, t):
return self.alpha + self.beta*self.msid(t)
def safe_variance(self, t):
"""
No need to invoke self.init_motion_detection.
"""
self.resample_inmask(t)
self.m = self.data.mean(1)
self.m2 = (self.data**2).mean(1)
self.m **= 2
self.m2 -= self.m
return self.m2.mean()
def correct_motion(self):
optimizer = self.optimizer
def callback(pc):
self.transforms[t].from_param(pc)
print(self.transforms[t])
if optimizer=='simplex':
fmin = optimize.fmin
elif optimizer=='powell':
fmin = optimize.fmin_powell
elif optimizer=='conjugate_gradient':
fmin = optimize.fmin_cg
else:
raise ValueError('Unrecognized optimizer')
# Resample data according to the current space/time transformation
self.resample_all_inmask()
# Optimize motion parameters
for t in range(self.nscans):
print('Correcting motion of scan %d/%d...' % (t+1, self.nscans))
def loss(pc):
self.transforms[t].from_param(pc)
return self.msid(t)
self.init_motion_detection(t)
pc0 = self.transforms[t].to_param()
pc = fmin(loss, pc0, callback=callback)
self.transforms[t].from_param(pc)
def resample(self):
print('Gridding...')
dims = self.dims
XYZ = np.mgrid[0:dims[0], 0:dims[1], 0:dims[2]]
XYZ = XYZ.reshape(3, np.prod(XYZ.shape[1::]))
res = np.zeros(dims)
for t in range(self.nscans):
print('Fully resampling scan %d/%d' % (t+1, self.nscans))
X, Y, Z = grid_coords(XYZ, self.transforms[t], self.from_world, self.to_world)
T = self.from_time(Z, self.timestamps[t])
cspline_sample4d(res[:,:,:,t], self.cbspline, X, Y, Z, T)
return res
def _resample4d(im4d, transforms=None):
"""
corr_im4d_array = _resample4d(im4d, transforms=None)
"""
r = Realign4d(im4d, transforms=transforms)
return r.resample()
def _realign4d(im4d,
loops=DEFAULT_WITHIN_LOOPS,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER):
"""
transforms = _realign4d(im4d, loops=2, speedup=4, optimizer='powell')
Parameters
----------
im4d : Image4d instance
"""
r = Realign4d(im4d, speedup=speedup, optimizer=optimizer)
for loop in range(loops):
r.correct_motion()
return r.transforms
def realign4d(runs,
within_loops=DEFAULT_WITHIN_LOOPS,
between_loops=DEFAULT_BETWEEN_LOOPS,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER,
align_runs=True):
"""
transforms = realign4d(runs, within_loops=2, bewteen_loops=5, speedup=4, optimizer='powell')
Parameters
----------
runs : list of Image4d objects
Returns
-------
transforms : list
nested list of rigid transformations
"""
# Single-session case
if not isinstance(runs, list) and not isinstance(runs, tuple):
runs = [runs]
nruns = len(runs)
# Correct motion and slice timing in each sequence separately
transfo_runs = [_realign4d(run, loops=within_loops, speedup=speedup, optimizer=optimizer) for run in runs]
if nruns==1:
return transfo_runs[0]
if align_runs==False:
return transfo_runs
# Correct between-session motion using the mean image of each corrected run
corr_runs = [_resample4d(runs[i], transforms=transfo_runs[i]) for i in range(nruns)]
aux = np.rollaxis(np.asarray([corr_run.mean(3) for corr_run in corr_runs]), 0, 4)
## Fake time series with zero inter-slice time
## FIXME: check that all runs have the same to-world transform
mean_img = Image4d(aux, to_world=runs[0].to_world, tr=1.0, tr_slices=0.0)
transfo_mean = _realign4d(mean_img, loops=between_loops, speedup=speedup, optimizer=optimizer)
corr_mean = _resample4d(mean_img, transforms=transfo_mean)
# Compose transformations for each run
for i in range(nruns):
run_to_world = transfo_mean[i]
transforms = [run_to_world*to_run for to_run in transfo_runs[i]]
transfo_runs[i] = transforms
return transfo_runs
|
'''
Created on 4 Jul 2016
@author: wnm24546
'''
import glob, os, re, sys
import h5py
import numpy as np
############################################
## EDIT LINES BELOW HERE ###################
############################################
#Give the directory path the files are and... (all values between ' or ")
working_directory = '/scratch/tmp/pdf_data'
#...either the full name of the files...
filenames_list=[]
#filenames_list = ['112004_KNJ-KA-218-01-PtNi-120s.hdf5', '112031_KNJ-KA-218-01-PtNi-120s.hdf5']
#...or the base name of the files and their numbers
file_name_template = 'Ce-BDC(250C-1hr)_aq-6s_2min'
file_numbers = []#222643, 222702, 222761, 222820, 222879,
#222938, 222997, 223056, 223115, 223174]
############################################
## NOTHING SHOULD NEED EDITING BELOW HERE ##
############################################
def main(files, template=None):
#Get numpy datasets from each of the files and put them into a list
dataSets = []
for name in files:
# if template == -1:
fName = os.path.join(wDir, name)
# else:
# fName = os.path.join(wDir, str(name)+template)
dataSets.append(get_data_from_file(fName))
#Merge dataSets into one big dataset with the same shape (0,2048,2048)
sumDataSet = np.zeros(dataSets[0].shape, dtype=np.int32)
for dataSet in dataSets:
sumDataSet = np.add(dataSet, sumDataSet)
#Create an average dataset by dividing the sumdataset by the number of files
avsDataSet = sumDataSet/len(files)
#Output the summed data and the averaged data to two HDF files with different names
outputFiles = {'summed' : sumDataSet, 'averaged' : avsDataSet}
for key in outputFiles:
if template == None:
output_file_name = key+".hdf5"
else:
output_file_name = key+"_"+template+".hdf5"
output_path = os.path.join(wDir, 'processing')
if not os.path.exists(output_path):
os.makedirs(output_path)
print "Writing "+key.title()+" dataset file..."
with h5py.File(os.path.join(output_path, output_file_name), 'w') as out_hdf:
out_hdf.attrs['creator']="mergeHDFs.py"
out_hdf.attrs['author']="Diamond Light Source Ltd."
out_hdf.attrs['comment']=key.title()+" dataset from "+str(len(files))+" HDF files (full names given in input_files attribute)."
out_hdf.attrs['input_files']=", ".join(files)
entry = out_hdf.create_group('entry')
instrument = entry.create_group('instrument')
detector = instrument.create_group('detector')
data = detector.create_dataset('data', data=outputFiles[key])
data.attrs['dim0']="frame number n"
data.attrs['dim1']="NDArray dim1"
data.attrs['dim2']="NDArray dim0"
data.attrs['interpretation']="image"
data.attrs['signal']=1
out_hdf.close()
def get_data_from_file(filename):
print "Reading "+filename+"..."
with h5py.File(filename, 'r') as dataFile:
return dataFile['/entry/instrument/detector/data'][()]
def usage_message():
print ("\nmergeHDFs can be configured in the script, or will take either one or two \n"
"arguments. To configure in the script, set the working_directory,\n"
"file_name_template and file_numbers fields for your needs.\n"
"The three arguments (separated by spaces) the script accepts are:\n"
"\t1) working directory - full path\n"
"\t2) the filename_str name template - .hdf5 is automatically appended\n"
"\t3) filename_str numbers - comma separated list of numbers in the filenames\n"
)
return
#Set the working directory
if len(sys.argv) >= 2:
wDir = sys.argv[1]
elif working_directory != None:
wDir = working_directory
else:
print "ERROR: No working directory given!"
usage_message()
exit(1)
#Check the working directory exists
if not os.path.isdir(wDir):
print "ERROR: Given working directory does not exist/is not directory."
exit(1)
#If we don't have a file_list already, try to make one
if (not filenames_list) | (filenames_list == None): #Empty list or None
#Set the template
if len(sys.argv) >= 3:
template = sys.argv[2]
elif file_name_template != None:
template = file_name_template
else:
print "ERROR: file_name_template not given!"
usage_message()
exit(1)
#Set the filename_str numbers
if len(sys.argv) == 4:
numbers = sys.argv[3].split(",")
elif not (not file_numbers) | (file_numbers != None):#If there are file numbers
numbers = file_numbers
else:
os.chdir(wDir)
numbers=[]
for filename_str in glob.glob("*"+str(template)+"*"):
if ("dark" not in filename_str) & ("pristine" not in filename_str):
numbers.append(re.findall('\d+',filename_str)[0]) #Assumes number we want is the first one in the filename
if not numbers:
print "ERROR: file_numbers not given & could not be found!"
usage_message()
exit(1)
#Make a file_list from the template & numbers
file_list = []
numbers.sort()
for number in numbers:
file_list.append(str(number)+"_"+str(template)+".hdf5")
else:
#We've got a list of all filenames already
file_list = filenames_list
#Check
for filename in file_list:
try:
assert os.path.exists(os.path.join(wDir, filename))
except:
print "ERROR: The file "+str(filename)+" does not exist in "+str(wDir)
exit(1)
if (template == "") | (template == None):
output_template = None
else:
output_template = template.replace("(", "_")
output_template = output_template.replace(")", "_")
output_template = output_template.replace(".", "p")
output_template = str(min(numbers))+"-"+str(max(numbers))+"_"+output_template
if __name__=="__main__":
main(file_list, output_template)
print "\n"
|
import time
import random
import hashlib
from social.utils import setting_name
from social.store import OpenIdStore
class BaseTemplateStrategy(object):
def __init__(self, strategy):
self.strategy = strategy
def render(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
return self.render_template(tpl, context)
else:
return self.render_string(html, context)
def render_template(self, tpl, context):
raise NotImplementedError('Implement in subclass')
def render_string(self, html, context):
raise NotImplementedError('Implement in subclass')
class BaseStrategy(object):
ALLOWED_CHARS = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789'
def __init__(self, backend=None, storage=None, request=None, tpl=None,
backends=None, *args, **kwargs):
tpl = tpl or BaseTemplateStrategy
if not isinstance(tpl, BaseTemplateStrategy):
tpl = tpl(self)
self.tpl = tpl
self.request = request
self.storage = storage
self.backends = backends
if backend:
self.backend_name = backend.name
self.backend = backend(strategy=self, *args, **kwargs)
else:
self.backend_name = None
self.backend = backend
def setting(self, name, default=None):
names = (setting_name(self.backend_name, name),
setting_name(name),
name)
for name in names:
try:
return self.get_setting(name)
except (AttributeError, KeyError):
pass
return default
def start(self):
# Clean any partial pipeline info before starting the process
self.clean_partial_pipeline()
if self.backend.uses_redirect():
return self.redirect(self.backend.auth_url())
else:
return self.html(self.backend.auth_html())
def complete(self, *args, **kwargs):
return self.backend.auth_complete(*args, **kwargs)
def continue_pipeline(self, *args, **kwargs):
return self.backend.continue_pipeline(*args, **kwargs)
def disconnect(self, user, association_id=None):
self.storage.user.disconnect(name=self.backend.name, user=user,
association_id=association_id)
def authenticate(self, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = self.backend
return self.backend.authenticate(*args, **kwargs)
def create_user(self, *args, **kwargs):
return self.storage.user.create_user(*args, **kwargs)
def get_user(self, *args, **kwargs):
return self.storage.user.get_user(*args, **kwargs)
def session_setdefault(self, name, value):
self.session_set(name, value)
return self.session_get(name)
def to_session(self, next, backend, *args, **kwargs):
return {
'next': next,
'backend': backend.name,
'args': args,
'kwargs': kwargs
}
def from_session(self, session):
return session['next'], session['backend'], \
session['args'], session['kwargs']
def clean_partial_pipeline(self):
self.session_pop('partial_pipeline')
def openid_store(self):
return OpenIdStore(self)
def get_pipeline(self):
return self.setting('PIPELINE', (
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
))
def random_string(self, length=12, chars=ALLOWED_CHARS):
# Implementation borrowed from django 1.4
try:
random.SystemRandom()
except NotImplementedError:
key = self.setting('SECRET_KEY', '')
seed = '%s%s%s' % (random.getstate(), time.time(), key)
random.seed(hashlib.sha256(seed.encode()).digest())
return ''.join([random.choice(chars) for i in range(length)])
def is_integrity_error(self, exception):
return self.storage.is_integrity_error(exception)
# Implement the following methods on strategies sub-classes
def redirect(self, url):
"""Return a response redirect to the given URL"""
raise NotImplementedError('Implement in subclass')
def get_setting(self, name):
"""Return value for given setting name"""
raise NotImplementedError('Implement in subclass')
def html(self, content):
"""Return HTTP response with given content"""
raise NotImplementedError('Implement in subclass')
def render_html(self, tpl=None, html=None, context=None):
"""Render given template or raw html with given context"""
return self.tpl.render(tpl, html, context)
def request_data(self, merge=True):
"""Return current request data (POST or GET)"""
raise NotImplementedError('Implement in subclass')
def request_host(self):
"""Return current host value"""
raise NotImplementedError('Implement in subclass')
def session_get(self, name, default=None):
"""Return session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_set(self, name, value):
"""Set session value for given key"""
raise NotImplementedError('Implement in subclass')
def session_pop(self, name):
"""Pop session value for given key"""
raise NotImplementedError('Implement in subclass')
def build_absolute_uri(self, path=None):
"""Build absolute URI with given (optional) path"""
raise NotImplementedError('Implement in subclass')
def is_response(self, value):
raise NotImplementedError('Implement in subclass')
|
import sys
newDelimiter="###GREPSILON###"
def usage():
print "USAGE:"
print sys.argv[0],"<delim>","<match>","--new-delimiter <new delimiter>","<filename>"
print " "
print "This script looks for <match> in blocks surrounded by beginning of file, <delim> and EOF"
print "Tip: this script generates delimiters for ease of use. ("+newDelimiter
def grep(delim, str, file):
buf=""
found=False
for line in file:
if delim in line:
if found:
yield buf
buf=""
found=False
else:
if str in line:
found=True
buf+=line
if found:
yield buf
if len(sys.argv)>3:
file=None
if len(sys.argv) == 6:
if sys.argv[3]=="--new-delimiter":
newDelimiter=sys.argv[4]
try:
file = open(sys.argv[-1],"r")
except:
print "Error opening",sys.argv[-1]
exit()
for block in grep(sys.argv[1],sys.argv[2],file):
print block,newDelimiter
else:
usage()
|
import hashlib
import hmac
import io
import os
import re
from binascii import b2a_base64, a2b_base64
from .. import ecdsa
from ..serialize.bitcoin_streamer import stream_bc_string
from ..ecdsa import ellipticcurve, numbertheory
from ..networks import address_prefix_for_netcode, network_name_for_netcode
from ..encoding import public_pair_to_bitcoin_address, to_bytes_32, from_bytes_32, double_sha256
from ..key import Key
# According to brainwallet, this is "inputs.io" format, but it seems practical
# and is deployed in the wild. Core bitcoin doesn't offer a message wrapper like this.
signature_template = '''\
-----BEGIN {net_name} SIGNED MESSAGE-----
{msg}
-----BEGIN SIGNATURE-----
{addr}
{sig}
-----END {net_name} SIGNED MESSAGE-----'''
def parse_signed_message(msg_in):
"""
Take an "armoured" message and split into the message body, signing address
and the base64 signature. Should work on all altcoin networks, and should
accept both Inputs.IO and Multibit formats but not Armory.
Looks like RFC2550 <https://www.ietf.org/rfc/rfc2440.txt> was an "inspiration"
for this, so in case of confusion it's a reference, but I've never found
a real spec for this. Should be a BIP really.
"""
# Convert to Unix line feeds from DOS style, iff we find them, but
# restore to same at the end. The RFC implies we should be using
# DOS \r\n in the message, but that does not always happen in today's
# world of MacOS and Linux devs. A mix of types will not work here.
dos_nl = ('\r\n' in msg_in)
if dos_nl:
msg_in = msg_in.replace('\r\n', '\n')
try:
# trim any junk in front
_, body = msg_in.split('SIGNED MESSAGE-----\n', 1)
except:
raise ValueError("expecting text SIGNED MESSSAGE somewhere")
try:
# - sometimes middle sep is BEGIN BITCOIN SIGNATURE, other times just BEGIN SIGNATURE
# - choose the last instance, in case someone signs a signed message
parts = re.split('\n-----BEGIN [A-Z ]*SIGNATURE-----\n', body)
msg, hdr = ''.join(parts[:-1]), parts[-1]
except:
raise ValueError("expected BEGIN SIGNATURE line", body)
# after message, expect something like an email/http headers, so split into lines
hdr = list(filter(None, [i.strip() for i in hdr.split('\n')]))
if '-----END' not in hdr[-1]:
raise ValueError("expecting END on last line")
sig = hdr[-2]
addr = None
for l in hdr:
l = l.strip()
if not l:
continue
if l.startswith('-----END'):
break
if ':' in l:
label, value = [i.strip() for i in l.split(':', 1)]
if label.lower() == 'address':
addr = l.split(':')[1].strip()
break
continue
addr = l
break
if not addr or addr == sig:
raise ValueError("Could not find address")
if dos_nl:
msg = msg.replace('\n', '\r\n')
return msg, addr, sig
def sign_message(key, message=None, verbose=False, use_uncompressed=None, msg_hash=None):
"""
Return a signature, encoded in Base64, which can be verified by anyone using the
public key.
"""
secret_exponent = key.secret_exponent()
if not secret_exponent:
raise TypeError("Private key is required to sign a message")
addr = key.address()
netcode = key.netcode()
mhash = hash_for_signing(message, netcode) if message else msg_hash
# Use a deterministic K so our signatures are deterministic.
try:
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash)
except RuntimeError:
# .. except if extremely unlucky
k = from_bytes_32(os.urandom(32))
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash, _k=k)
is_compressed = not key._use_uncompressed(use_uncompressed)
assert y_odd in (0, 1)
# See http://bitcoin.stackexchange.com/questions/14263
# for discussion of the proprietary format used for the signature
#
# Also from key.cpp:
#
# The header byte: 0x1B = first key with even y, 0x1C = first key with odd y,
# 0x1D = second key with even y, 0x1E = second key with odd y,
# add 0x04 for compressed keys.
first = 27 + y_odd + (4 if is_compressed else 0)
sig = b2a_base64(bytearray([first]) + to_bytes_32(r) + to_bytes_32(s)).strip()
if not isinstance(sig, str):
# python3 b2a wrongness
sig = str(sig, 'ascii')
if not verbose or message is None:
return sig
return signature_template.format(
msg=message, sig=sig, addr=addr,
net_name=network_name_for_netcode(netcode).upper())
def verify_message(key_or_address, signature, message=None, msg_hash=None, netcode=None):
"""
Take a signature, encoded in Base64, and verify it against a
key object (which implies the public key),
or a specific base58-encoded pubkey hash.
"""
if isinstance(key_or_address, Key):
# they gave us a private key or a public key already loaded.
key = key_or_address
else:
key = Key.from_text(key_or_address)
netcode = netcode or key.netcode()
try:
# Decode base64 and a bitmask in first byte.
is_compressed, recid, r, s = _decode_signature(signature)
except ValueError:
return False
# Calculate hash of message used in signature
mhash = hash_for_signing(message, netcode) if message is not None else msg_hash
# Calculate the specific public key used to sign this message.
pair = _extract_public_pair(ecdsa.generator_secp256k1, recid, r, s, mhash)
# Check signing public pair is the one expected for the signature. It must be an
# exact match for this key's public pair... or else we are looking at a validly
# signed message, but signed by some other key.
#
pp = key.public_pair()
if pp:
# expect an exact match for public pair.
return pp == pair
else:
# Key() constructed from a hash of pubkey doesn't know the exact public pair, so
# must compare hashed addresses instead.
addr = key.address()
prefix = address_prefix_for_netcode(netcode)
ta = public_pair_to_bitcoin_address(pair, compressed=is_compressed, address_prefix=prefix)
return ta == addr
def msg_magic_for_netcode(netcode):
"""
We need the constant "strMessageMagic" in C++ source code, from file "main.cpp"
It is not shown as part of the signed message, but it is prefixed to the message
as part of calculating the hash of the message (for signature). It's also what
prevents a message signature from ever being a valid signature for a transaction.
Each altcoin finds and changes this string... But just simple substitution.
"""
name = network_name_for_netcode(netcode)
if netcode in ('BLK', 'BC'):
name = "BlackCoin" # NOTE: we need this particular HumpCase
# testnet, the first altcoin, didn't change header
if netcode == 'XTN':
name = "Bitcoin"
return '%s Signed Message:\n' % name
def _decode_signature(signature):
"""
Decode the internal fields of the base64-encoded signature.
"""
if signature[0] not in ('G', 'H', 'I'):
# Because we know the first char is in range(27, 35), we know
# valid first character is in this set.
raise TypeError("Expected base64 value as signature", signature)
# base 64 decode
sig = a2b_base64(signature)
if len(sig) != 65:
raise ValueError("Wrong length, expected 65")
# split into the parts.
first = ord(sig[0:1]) # py3 accomidation
r = from_bytes_32(sig[1:33])
s = from_bytes_32(sig[33:33+32])
# first byte encodes a bits we need to know about the point used in signature
if not (27 <= first < 35):
raise ValueError("First byte out of range")
# NOTE: The first byte encodes the "recovery id", or "recid" which is a 3-bit values
# which selects compressed/not-compressed and one of 4 possible public pairs.
#
first -= 27
is_compressed = bool(first & 0x4)
return is_compressed, (first & 0x3), r, s
def _extract_public_pair(generator, recid, r, s, value):
"""
Using the already-decoded parameters of the bitcoin signature,
return the specific public key pair used to sign this message.
Caller must verify this pubkey is what was expected.
"""
assert 0 <= recid < 4, recid
G = generator
n = G.order()
curve = G.curve()
order = G.order()
p = curve.p()
x = r + (n * (recid // 2))
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = numbertheory.modular_sqrt(alpha, p)
inv_r = numbertheory.inverse_mod(r, order)
y = beta if ((beta - recid) % 2 == 0) else (p - beta)
minus_e = -value % order
R = ellipticcurve.Point(curve, x, y, order)
Q = inv_r * (s * R + minus_e * G)
public_pair = (Q.x(), Q.y())
# check that this is the RIGHT public key? No. Leave that for the caller.
return public_pair
def hash_for_signing(msg, netcode='BTC'):
"""
Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin
encoded stream of two strings: a fixed magic prefix and the actual message.
"""
magic = msg_magic_for_netcode(netcode)
fd = io.BytesIO()
stream_bc_string(fd, bytearray(magic, 'ascii'))
stream_bc_string(fd, bytearray(msg, 'utf-8'))
# return as a number, since it's an input to signing algos like that anyway
return from_bytes_32(double_sha256(fd.getvalue()))
def deterministic_make_k(generator_order, secret_exponent, val,
hash_f=hashlib.sha256, trust_no_one=True):
"""
Generate K value BUT NOT according to https://tools.ietf.org/html/rfc6979
ecsda.deterministic_generate_k() was more general than it needs to be,
and I felt the hand of NSA in the wholly constants, so I simplified and
changed the salt.
"""
n = generator_order
assert hash_f().digest_size == 32
# code below has been specialized for SHA256 / bitcoin usage
assert n.bit_length() == 256
hash_size = 32
if trust_no_one:
v = b"Edward Snowden rocks the world!!"
k = b"Qwest CEO Joseph Nacchio is free"
else:
v = b'\x01' * hash_size
k = b'\x00' * hash_size
priv = to_bytes_32(secret_exponent)
if val > n:
val -= n
h1 = to_bytes_32(val)
k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
k = hmac.new(k, v + b'\x01' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
while 1:
t = hmac.new(k, v, hash_f).digest()
k1 = from_bytes_32(t)
if k1 >= 1 and k1 < n:
return k1
k = hmac.new(k, v + b'\x00', hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
def _my_sign(generator, secret_exponent, val, _k=None):
"""
Return a signature for the provided hash (val), using the provided
random nonce, _k or generate a deterministic K as needed.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = generator
n = G.order()
k = _k or deterministic_make_k(n, secret_exponent, val)
p1 = k * G
r = p1.x()
if r == 0:
raise RuntimeError("amazingly unlucky random number r")
s = (numbertheory.inverse_mod(k, n) *
(val + (secret_exponent * r) % n)) % n
if s == 0:
raise RuntimeError("amazingly unlucky random number s")
return (r, s, p1.y() % 2)
# EOF
|
# -*- coding: utf-8 -*-
from openerp import fields, models, api
class product_catalog_report(models.Model):
_inherit = 'product.product_catalog_report'
category_type = fields.Selection(
[('public_category', 'Public Category'),
('accounting_category', 'Accounting Category')],
'Category Type',
required=True,
default='accounting_category',
)
public_category_ids = fields.Many2many(
'product.public.category',
'product_catalog_report_categories_public',
'product_catalog_report_id',
'category_id',
'Product Categories Public',
)
@api.multi
def prepare_report(self):
self = super(product_catalog_report, self).prepare_report()
if self.category_type == 'public_category':
categories = self.public_category_ids
if self.include_sub_categories and categories:
categories = self.env['product.public.category'].search(
[('id', 'child_of', categories.ids)])
else:
categories = self.category_ids
if self.include_sub_categories and categories:
categories = self.env['product.category'].search(
[('id', 'child_of', categories.ids)])
return self.with_context(
category_ids=categories.ids,
category_type=self.category_type)
|
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from IPython.utils.traitlets import List, Unicode
from IPython.nbformat.v4 import output_from_msg
from .base import Preprocessor
from IPython.utils.traitlets import Integer
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
extra_arguments = List(Unicode)
def preprocess(self, nb, resources):
from IPython.kernel import run_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
with run_kernel(kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w')) as kc:
self.kc = kc
nb, resources = super(
ExecutePreprocessor, self).preprocess(nb, resources)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
try:
outputs = self.run_cell(
self.kc.shell_channel, self.kc.iopub_channel, cell)
except Exception as e:
self.log.error("failed to run cell: " + repr(e))
self.log.error(str(cell.source))
raise
cell.outputs = outputs
return cell, resources
def run_cell(self, shell, iopub, cell):
msg_id = shell.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
msg = shell.get_msg(timeout=self.timeout)
except Empty:
self.log.error("Timeout waiting for execute reply")
raise
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
msg = iopub.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
|
from flask import Flask, render_template, request, redirect
from sql import select
# Create Flask app
app = Flask(__name__)
# API Blueprint
from api import api
app.register_blueprint(api, url_prefix="/api")
# Load Index page
@app.route("/")
def index():
return render_template("index.html")
# --------------- BILLS --------------- #
# Bills page
@app.route("/bills")
def bills():
bills = select("bills")
return render_template("bills.html", bills=bills)
# Add Bill page
@app.route("/bills/add")
def bills_add():
return render_template("bills_add.html")
# Edit Bill page
@app.route("/bills/edit")
def bills_edit():
return render_template("bills_edit.html")
# --------------- SPENDING --------------- #
# Spending page
@app.route("/spending")
def spending():
spending = select("spending")
return render_template("spending.html", spending=spending)
# Add Spending page
@app.route("/spending/add")
def spending_add():
accounts = select("accounts")
return render_template("spending_add.html", accounts=accounts)
# Edit Spending page
@app.route("/spending/edit")
def spending_edit():
return render_template("spending_edit.html")
# --------------- ACCOUNTS --------------- #
# Accounts page
@app.route("/accounts")
def accounts():
accounts = select("accounts")
return render_template("accounts.html", accounts=accounts)
# Add Account page
@app.route("/accounts/add")
def accounts_add():
return render_template("accounts_add.html")
# Edit Account page
@app.route("/accounts/edit")
def accounts_edit():
return render_template("accounts_edit.html")
# Run Flask app on load
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
|
import sc, random, contextlib, wave, os, math
import shlex, subprocess, signal
import NRTOSCParser3
import numpy as np
import scipy.signal
# generator class for weighted random numbers
#
# Pass in one or the other:
# - weights: custom weights array
# - size: size of "standard" weights array that algo should make on its own
#
# call next to actually make the random selection
#
class RandomGenerator_8Bit(object):
def __init__(self, initval=-1):
if initval >= 0:
self.val = initval
else:
self.val = random.randint(0,128)
def next(self, scale=1.0):
self.val = random.randint(0,128)
def __call__(self): return self.next()
# helper function
def midi2hz(m): return pow(2.0, (m/12.0))
# slot assignments for sigmaSynth
ALPHA = 0
C_DELAY = 1
BETA = 2
D_MULT = 3
GAMMA = 4
MS_BINS = 5
class GenomicExplorer:
def __init__(self, anchor, sfilenames, size=20, kdepth=10): #, start_state=[1.0, 0.0, 1.0, 1.0, 1.0, 0.0]
self.anchor = anchor
self.sfpaths = [(anchor + '/snd/' + sfilename) for sfilename in sfilenames]
self.filenames = sfilenames
self.sfinfos = []
for path in self.sfpaths:
with contextlib.closing(wave.open(path,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
dur = frames/float(rate)
self.sfinfos += [{'rate':rate,'dur':dur}]
self.mutation_prob = 0.05
#self.xover_prob = 0.05
self.depth = kdepth
# 'alpha', 'c_delay', 'beta', 'd_mult', 'gamma', 'ms_bins'
self.parser = NRTOSCParser3.NRTOSCParser3(anchor=self.anchor)
self.rawtable, self.rawmaps, self.dists = dict(), dict(), dict()
print self.sfpaths
print self.sfinfos
self.init_population(size=size)
def init_population(self, size):
self.population = []
for n in range(size):
#start = [random.randrange(500, 1000)*0.001, random.randrange(0,50)*0.001, random.randrange(500, 1000)*0.001, random.randrange(100,1000)*0.01, random.randrange(500, 1000)*0.001, random.randrange(0,5000)*0.01]
self.population += [Genome()] #random seed
# self.population += [Genome(starter)]
self.population[0] = Genome(values=[0,0,0,0,0,0])
self.analyze_individual(0)
self.activate_raw_data(0)
self.compare_all_individuals(aflag=True)
def mutate_pop(self):
for indiv in range(1, len(self.population)):
if random.random() < self.mutation_prob:
print "indiv: ", indiv
self.population[ indiv ].mutate()
self.do_update_cascade(indiv)
def do_update_cascade(self, index, clearedits=False):
if clearedits is True:
self.population[ index ].edits = 0
else:
self.population[ index ].edits += 1
self.analyze_individual( index )
self.activate_raw_data( index )
self.compare_individual_chi_squared( index )
# self.compare_individual( index )
def mate(self, a, b, kill_index):
# cut = random.randint(0,5)
offspring = None
if random.random() < 0.5:
offspring = self.population[a].values[:]
else:
offspring = self.population[b].values[:]
# basic gene selection from 2 parents
for i in range(6):
if random.random() < 0.5:
offspring[i] = self.population[a].values[i]
else:
offspring[i] = self.population[b].values[i]
self.population[kill_index] = Genome(offspring)
self.do_update_cascade(kill_index, True)
def sort_by_distances(self, depth):
sorted_dists = [[k, self.dists[k], self.population[k].age, self.population[k].edits] for k in sorted(self.dists.keys())]
sorted_dists = sorted(sorted_dists[1:], key = lambda row: row[1]) # + (maxedits - row[3])))
return sorted_dists[:depth], sorted_dists[(-1*depth):]
def reproduce(self, depth=25):
kills, duplicates = self.sort_by_distances(depth)
print 'depth: ', depth
# depth # of times: choose 2 random parents to mate and overwrite replacement in unfit individual's slot
for n in range(depth):
print 'num. duplicates: ', len(duplicates)
aidx = duplicates[ random.randint(0, depth-1) ][0]
bidx = duplicates[ random.randint(0, depth-1) ][0]
kidx = kills[ random.randint(0, depth-1) ][0]
self.mate(aidx, bidx, kidx)
def age_pop(self):
for i in range(len(self.population)): self.population[i].age += 1
def iterate(self, iters=1):
sc.quit()
for iter in range(iters):
self.age_pop()
self.mutate_pop()
# self.crossover()
if (iter%20)==0:
print self.population[0].age
self.reproduce(self.depth)
def print_all_individuals(self):
print '== pop ==========================='
for g in self.population: print g
def start_sc(self):
try:
sc.start(verbose=1, spew=1, startscsynth=1)
except OSError: # in case we've already started the synth
print 'QUIT!'
sc.quit()
print 'sfpath: ', self.sfpath
for i, sfpath in enumerate(self.sfpaths):
bnum = sc.loadSnd(sfpath, wait=False)
print 'bnum: ', bnum
self.infos[i]['bnum'] = bnum
return 1
# |outbus=20, srcbufNum, start=0.0, dur=1.0, transp=1.0, c_delay=0.0, c_decay=0.0, d_mult=1.0, d_amp=0.7, ms_bins=0, alpha=1, beta=1, gamma=1|
def play_genome(self, index):
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
print '===================\n', self.infos[slot]['dur']
sc.Synth('sigmaSynth',
args=[
'srcbufNum', self.infos[slot]['bnum'],
'start', 0,
'dur', self.infos[slot]['dur']*1000,
'transp', tr,
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
def analyze_individual(self, index):
# oscpath = os.path.join(self.anchor, 'snd', 'osc', `index`, (os.path.splitext(self.filename)[0] + '_sigmaAnalyzer.osc'))
# mdpath = os.path.join(self.anchor, 'snd', 'md', `index`, self.filename)
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
oscpath, mdpath = self.parser.createNRTScore(self.sfpaths[slot],
index=index,
tratio=tr,
srate=self.sfinfos[slot]['rate'],
duration=self.sfinfos[slot]['dur'],
params=[
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
cmd = 'scsynth -N ' + oscpath + ' _ _ 44100 WAVE float32 -o 1'
# print cmd
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
# print 'PID: ', p.pid
rc = p.wait()
# print 'RC: ', rc
if rc == 1:
num_frames = int(math.ceil(self.sfinfos[slot]['dur'] / 0.04 / tr))
# print 'num frames: ', num_frames
self.rawtable[index] = (mdpath, num_frames)
# print self.rawtable
def render_individual(self, index):
vals = self.population[index].realvalues
if vals[C_DELAY] < 1.0:
cdelay = 0.0
else:
cdelay = vals[C_DELAY]
decay = 0.9
tr = self.population[index].tratio
if index == 0:
slot = 0
else:
slot = 1
oscpath, mdpath = self.parser.createNRTScore(self.sfpaths[slot],
index=index,
tratio=tr,
srate=self.sfinfos[slot]['rate'],
duration=self.sfinfos[slot]['dur'],
params=[
'c_delay', cdelay,
'c_decay', decay,
'd_mult', vals[D_MULT],
'ms_bins', vals[MS_BINS],
'alpha', vals[ALPHA],
'beta', vals[BETA],
'gamma', vals[GAMMA]])
cmd = 'scsynth -N ' + oscpath + ' _ ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff')) + ' 44100 AIFF int16 -o 1'
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
rc = p.wait()
if rc == 1:
print 'SUCCESS: ', os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff'))
rc = 0
else:
return None
cmd = 'sox -b 16 ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff')) + ' ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.wav')) # + '; rm ' + os.path.join(self.anchor, 'snd', 'out', (str(index) + '.aiff'))
print cmd
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, shell=True, close_fds=True)
rc = p.wait()
print rc
if rc == 1: ' DOUBLE SUCCESS!!'
def activate_raw_data(self, index):
mdpath = self.rawtable[index][0]
num_frames = self.rawtable[index][1]
self.rawmaps[index] = np.memmap(mdpath, dtype=np.float32, mode='r', offset=272, shape=(num_frames, 25))
"""
COMPARE_ALL_INDIVIDUALS:
... to individual in slot 0!
"""
def compare_all_individuals(self, aflag=False):
for i in range(1, len(self.population)):
if aflag:
self.analyze_individual(i)
self.activate_raw_data(i)
# self.compare_individual_chi_squared(i)
self.compare_individual(i)
print self.dists
return self.dists
"""
COMPARE_INDIVIDUAL:
... to individual in the slot that is stipulated by the arg zeroindex!
- by convention, we should usually put what we are comparing to in slot 0
"""
def compare_individual(self, index, zeroindex=0):
i_length = self.rawmaps[index].shape[0]
zr0_length = self.rawmaps[zeroindex].shape[0]
print i_length, ' | ', zr0_length
# i1_length = self.rawmaps[index-1].shape[0] ## <--- NEIGHBOR comparison
# print i_length, ' | ', i1_length, ' | ', zr0_length
# based on length comparison, resample the mutated individuals so that they are same length as the zeroth individual (that does not mutate)
# if indiv. is longer, resample indiv., take abs. diff., sum, div. by length
if zr0_length < i_length:
zero_dist = float(np.sum(np.abs(scipy.signal.signaltools.resample(self.rawmaps[index], zr0_length, window='hanning') - self.rawmaps[0]))) / float(zr0_length)
# print self.dists[index]
# if zeroth indiv. is longer, resample zeroth indiv., take abs. diff., sum, div. by length, then do same comparison with "neighbor"
elif i_length < zr0_length:
zero_dist = float(np.sum(np.abs(self.rawmaps[index] - scipy.signal.signaltools.resample(self.rawmaps[0], float(i_length), window='hanning')))) / float(i_length)
else:
# otherwise, take abs. diff., sum, div. by length, then do same comparison with "neighbor"
# print 'ZERO'
zero_dist = float(np.sum(np.abs(self.rawmaps[index][:,1:] - self.rawmaps[0][:,1:]))) / float(zr0_length)
### CHECK THIS DISTANCE CALCULATION!!!!!!
power_dist = float(np.sqrt(np.sum(np.abs(self.rawmaps[index][:,0] - self.rawmaps[0][:,0])))) / float(zr0_length)
print (zero_dist, (power_dist * 10.0))
zero_dist += (power_dist * 10.0)
# if i1_length < i_length:
# neighbor_dist = float(np.sum(np.abs(scipy.signal.signaltools.resample(self.rawmaps[index-1], i_length, window='hanning') - self.rawmaps[index]))) / float(i_length)
# elif i_length < i1_length:
# neighbor_dist = float(np.sum(np.abs(self.rawmaps[index-1] - scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning')))) / float(i1_length)
# else:
# print 'ZERO-NEIGHBOR'
# neighbor_dist = float(np.sum(np.abs(self.rawmaps[index-1] - scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning')))) / float(i1_length)
# self.dists[index] = zero_dist + neighbor_dist
self.dists[index] = zero_dist
def compare_individual_chi_squared(self, index):
i_length = self.rawmaps[index].shape[0]
i1_length = self.rawmaps[index-1].shape[0]
zr0_length = self.rawmaps[0].shape[0]
# print i_length, '|', zr0_length
# based on length comparison, resample the mutated individuals so that they are same length as the zeroth individual (that does not mutate)
# if indiv. is longer, resample indiv., take abs. diff., sum, div. by length
if zr0_length < i_length:
zero_dist = scipy.stats.mstats.chisquare(scipy.signal.signaltools.resample(self.rawmaps[index], zr0_length, window='hanning'), self.rawmaps[0])
# print self.dists[index]
# if zeroth indiv. is longer, resample zeroth indiv., take abs. diff., sum, div. by length, then do same comparison with "neighbor"
elif i_length < zr0_length:
zero_dist = scipy.stats.mstats.chisquare(self.rawmaps[index], scipy.signal.signaltools.resample(self.rawmaps[0], i_length, window='hanning'))
else:
# otherwise, take abs. diff., sum, div. by length, then do same comparison with "neighbor"
print 'CHI-ZERO'
zero_dist = scipy.stats.mstats.chisquare(self.rawmaps[index], self.rawmaps[0])
if i1_length < i_length:
neighbor_dist = scipy.stats.mstats.chisquare(scipy.signal.signaltools.resample(self.rawmaps[index-1], i_length, window='hanning') - self.rawmaps[index])
elif i_length < i1_length:
neighbor_dist = scipy.stats.mstats.chisquare(self.rawmaps[index-1], scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning'))
else:
print 'CHI-NEIGHBOR'
neighbor_dist = scipy.stats.mstats.chisquare(self.rawmaps[index-1], scipy.signal.signaltools.resample(self.rawmaps[index], i1_length, window='hanning'))
nsum = np.sum(np.abs(neighbor_dist[0].data[:24]))
zsum = np.sum(np.abs(zero_dist[0].data[:24]))
nasum = neighbor_dist[0].data[24]
zasum = zero_dist[0].data[24]
self.dists[index] = nsum + zsum - (24.0 * nasum) - (24.0 * zasum)
class Genome:
def __init__(self, values=None, slotranges=[[1.0,0.5],[0.0,0.05],[1.0, 0.5],[1.0,10.],[1.0,0.5],[0.0,50.]]):
# """
# 'alpha', 'c_delay', 'beta', 'd_mult', 'gamma', 'ms_bins'
# [[1.0,0.5],[0.0,0.05],[1.0,0.5],[1.0,10.],[1.0,0.5],[0.0,50.]]
# """
self.tratio = 1.0 # CHECK THIS... WHY IS IT HERE/in Hertz!!! ???
self.boundaries = slotranges
self.generators = [RandomGenerator_8Bit(-1) for n in range(6)] ### CONSTANT WARNING
#StaticGenerator_8Bit(VAL) ???
if values is None:
print 'values is None, generators are seeded randomly!'
self.values = [gen.val for gen in self.generators]
else:
self.values = values
self.bitlength = len(self.values) * 8
self.binarystring = vals_to_binarystring(self.values)
# print self.values
# print type(self.values[0])
self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
self.age = 0
self.edits = 0
def __repr__(self):
print tuple(self.values)
print ((self.age, self.edits) + tuple(self.values) + tuple(self.binarystring))
return "%9i/%9i || %.6f|%.6f|%.6f|%.6f|%.6f|%.6f" % ((self.age, self.edits) + tuple(self.realvalues)) # + tuple(self.binarystring)
def mutate(self):
pos = random.randint(0,(self.bitlength-1))
# flip bit
print 'bit flipped to: ', abs(1 - int(self.binarystring[pos],2))
self.binarystring = substitute_char_in_string(self.binarystring, pos, abs(1 - int(self.binarystring[pos],2)))
# recalc binary string
self.values = binarystring_to_vals(self.binarystring)
print "values: ", self.values
self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
# def xover_sub(self, pos, incomingSeq, headortail=0):
# if headortail == 0:
# print '<<>> ', self.binarystring
# print '<<>> ', pos
# print '<<>> ', incomingSeq
# self.binarystring = incomingSeq[:pos] + self.binarystring[pos:]
# else:
# print '<<>> ', self.binarystring
# print '<<>> ', pos
# print '<<>> ', incomingSeq
# self.binarystring = self.binarystring[:pos] + incomingSeq[:(len(self.binarystring)-pos)]
# # recalc binary string
# print '==== ', self.binarystring
# self.values = binarystring_to_vals(self.binarystring)
# print "values: ", self.values
# self.realvalues = [lininterp(val,self.boundaries[i]) for i,val in enumerate(self.values)]
def lininterp(val,bounds=[0.,1.]):
return (((val/128.0)*(bounds[1]-bounds[0]))+bounds[0])
def substitute_char_in_string(s, p, c):
l = list(s)
l[p] = str(c)
return "".join(l)
# def substitute_string_head(s, p, snew):
# s1 = snew[:]
# print '++++ ', s1
# s2 = s[p:]
# print '++++ ', s2
# return (s1 + s2)[:len(s)]
#
# def substitute_string_tail(s, p, snew):
# s1 = s[:p]
# print '==== ', s1
# print len(s)
# print p
# s2 = snew[:(len(s)-p)]
# print '==== ', s2
# return (s1 + s2)[:len(s)]
def vals_to_binarystring(vals = [0, 0, 0, 0, 0]):
return ''.join((("{0:08b}".format(val)) for val in vals))
# never a '0bXXX' string!
def binarystring_to_vals(binstring):
mystring = binstring[:]
length = len(mystring) / 8 # ignore the last digits if it doesn't chunk into 8-item substrings
res = []
# print mystring[(n*8):((n+1)*8)]
return [int(mystring[(n*8):((n+1)*8)], 2) for n in range(length)]
# if __name__=='__main__':
# genex = GenomicExplorer('/Users/kfl/dev/python/sc-0.3.1/genomic', 'test.wav')
# genex.analyze_genome(1)
|
from datetime import date
from django.conf import settings
from kitsune.kbadge.tests import BadgeFactory
from kitsune.questions.badges import QUESTIONS_BADGES
from kitsune.questions.tests import AnswerFactory
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import UserFactory
class TestQuestionsBadges(TestCase):
def test_answer_badge(self):
"""Verify the Support Forum Badge is awarded properly."""
# Create the user and badge.
year = date.today().year
u = UserFactory()
badge_template = QUESTIONS_BADGES['answer-badge']
b = BadgeFactory(
slug=badge_template['slug'].format(year=year),
title=badge_template['title'].format(year=year),
description=badge_template['description'].format(year=year))
# Create one less answer than reqiured to earn badge
AnswerFactory.create_batch(settings.BADGE_LIMIT_SUPPORT_FORUM - 1, creator=u)
# User should NOT have the badge yet.
assert not b.is_awarded_to(u)
# Create 1 more answer.
AnswerFactory(creator=u)
# User should have the badge now.
assert b.is_awarded_to(u)
|
import operator
import os
import pprint
import random
import signal
import time
import uuid
import logging
import pytest
import psutil
from collections import defaultdict, namedtuple
from multiprocessing import Process, Queue
from queue import Empty, Full
from cassandra import ConsistencyLevel, WriteTimeout
from cassandra.query import SimpleStatement
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
from tools.misc import generate_ssl_stores, new_node
from .upgrade_base import switch_jdks
from .upgrade_manifest import (build_upgrade_pairs, current_2_0_x,
current_2_1_x, current_2_2_x, current_3_0_x,
indev_2_2_x, indev_3_x)
logger = logging.getLogger(__name__)
def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for writing/rewriting data continuously.
Pushes to a queue to be consumed by data_checker.
Pulls from a queue of already-verified rows written by data_checker that it can overwrite.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE cf SET v=? WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
val = uuid.uuid4()
session.execute(prepared, (val, key))
to_verify_queue.put_nowait((key, val,))
except Exception:
logger.debug("Error in data writer process!")
to_verify_queue.close()
raise
def data_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking data continuously.
Pulls from a queue written to by data_writer to know what to verify.
Pushes to a queue to tell data_writer what's been verified and could be a candidate for re-writing.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT v FROM cf WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_val) = to_verify_queue.get_nowait()
actual_val = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in data verifier process!")
verification_done_queue.close()
raise
else:
try:
verification_done_queue.put_nowait(key)
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
tester.assertEqual(expected_val, actual_val, "Data did not match expected value!")
def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for incrementing counters continuously.
Pushes to a queue to be consumed by counter_checker.
Pulls from a queue of already-verified rows written by data_checker that it can increment again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE countertable SET c = c + 1 WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
count = 0 # this will get set to actual last known count if we do a re-write
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key, count = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
session.execute(prepared, (key))
to_verify_queue.put_nowait((key, count + 1,))
except Exception:
logger.debug("Error in counter incrementer process!")
to_verify_queue.close()
raise
def counter_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking counters continuously.
Pulls from a queue written to by counter_incrementer to know what to verify.
Pushes to a queue to tell counter_incrementer what's been verified and could be a candidate for incrementing again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT c FROM countertable WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_count) = to_verify_queue.get_nowait()
actual_count = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in counter verifier process!")
verification_done_queue.close()
raise
else:
tester.assertEqual(expected_count, actual_count, "Data did not match expected value!")
try:
verification_done_queue.put_nowait((key, actual_count))
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
@pytest.mark.upgrade_test
@pytest.mark.resource_intensive
class TestUpgrade(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
"""
test_version_metas = None # set on init to know which versions to use
subprocs = None # holds any subprocesses, for status checking and cleanup
extra_config = None # holds a non-mutable structure that can be cast as dict()
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
# Occurs due to test/ccm writing topo on down nodes
r'Cannot update data center or rack from.*for live host',
# Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
r'Unknown column cdc during deserialization',
)
def setUp(self):
logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
os.environ['CASSANDRA_VERSION'] = self.test_version_metas[0].version
switch_jdks(self.test_version_metas[0].java_version)
super(TestUpgrade, self).setUp()
logger.debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
def init_config(self):
Tester.init_config(self)
if self.extra_config is not None:
logger.debug("Setting extra configuration options:\n{}".format(
pprint.pformat(dict(self.extra_config), indent=4))
)
self.cluster.set_configuration_options(
values=dict(self.extra_config)
)
def test_parallel_upgrade(self):
"""
Test upgrading cluster all at once (requires cluster downtime).
"""
self.upgrade_scenario()
def test_rolling_upgrade(self):
"""
Test rolling upgrade of the cluster, so we have mixed versions part way through.
"""
self.upgrade_scenario(rolling=True)
def test_parallel_upgrade_with_internode_ssl(self):
"""
Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
"""
self.upgrade_scenario(internode_ssl=True)
def test_rolling_upgrade_with_internode_ssl(self):
"""
Rolling upgrade test using internode ssl.
"""
self.upgrade_scenario(rolling=True, internode_ssl=True)
def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, after_upgrade_call=(), internode_ssl=False):
# Record the rows we write as we go:
self.row_values = set()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
if internode_ssl:
logger.debug("***using internode ssl***")
generate_ssl_stores(self.fixture_dtest_setup.test_path)
self.cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
if populate:
# Start with 3 node cluster
logger.debug('Creating cluster (%s)' % self.test_version_metas[0].version)
cluster.populate(3)
[node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
else:
logger.debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node' + str(i)
setattr(self, node_name, node)
if create_schema:
if rolling:
self._create_schema_for_rolling()
else:
self._create_schema()
else:
logger.debug("Skipping schema creation (should already be built)")
time.sleep(5) # sigh...
self._log_current_ver(self.test_version_metas[0])
if rolling:
# start up processes to write and verify data
write_proc, verify_proc, verification_queue = self._start_continuous_write_and_verify(wait_for_rowcount=5000)
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
for num, node in enumerate(self.cluster.nodelist()):
# sleep (sigh) because driver needs extra time to keep up with topo and make quorum possible
# this is ok, because a real world upgrade would proceed much slower than this programmatic one
# additionally this should provide more time for timeouts and other issues to crop up as well, which we could
# possibly "speed past" in an overly fast upgrade test
time.sleep(60)
self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
logger.debug('Successfully upgraded %d of %d nodes to %s' %
(num + 1, len(self.cluster.nodelist()), version_meta.version))
self.cluster.set_install_dir(version=version_meta.version)
# Stop write processes
write_proc.terminate()
# wait for the verification queue's to empty (and check all rows) before continuing
self._wait_until_queue_condition('writes pending verification', verification_queue, operator.le, 0, max_wait_s=1200)
self._check_on_subprocs([verify_proc]) # make sure the verification processes are running still
self._terminate_subprocs()
# not a rolling upgrade, do everything in parallel:
else:
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
self._write_values()
self._increment_counters()
self.upgrade_to_version(version_meta, internode_ssl=internode_ssl)
self.cluster.set_install_dir(version=version_meta.version)
self._check_values()
self._check_counters()
self._check_select_count()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
logger.debug('All nodes successfully upgraded to %s' % version_meta.version)
self._log_current_ver(version_meta)
cluster.stop()
def tearDown(self):
# just to be super sure we get cleaned up
self._terminate_subprocs()
super(TestUpgrade, self).tearDown()
def _check_on_subprocs(self, subprocs):
"""
Check on given subprocesses.
If any are not alive, we'll go ahead and terminate any remaining alive subprocesses since this test is going to fail.
"""
subproc_statuses = [s.is_alive() for s in subprocs]
if not all(subproc_statuses):
message = "A subprocess has terminated early. Subprocess statuses: "
for s in subprocs:
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
message += "attempting to terminate remaining subprocesses now."
self._terminate_subprocs()
raise RuntimeError(message)
def _terminate_subprocs(self):
for s in self.fixture_dtest_setup.subprocs:
if s.is_alive():
try:
psutil.Process(s.pid).kill() # with fire damnit
except Exception:
logger.debug("Error terminating subprocess. There could be a lingering process.")
pass
def upgrade_to_version(self, version_meta, partial=False, nodes=None, internode_ssl=False):
"""
Upgrade Nodes - if *partial* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
logger.debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
switch_jdks(version_meta.java_version)
logger.debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
if not partial:
nodes = self.cluster.nodelist()
for node in nodes:
logger.debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
for node in nodes:
node.set_install_dir(version=version_meta.version)
logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
if internode_ssl and version_meta.version >= '4.0':
node.set_configuration_options({'server_encryption_options': {'enabled': True, 'enable_legacy_ssl_storage_port': True}})
# hacky? yes. We could probably extend ccm to allow this publicly.
# the topology file needs to be written before any nodes are started
# otherwise they won't be grouped into dc's properly for multi-dc tests
self.cluster._Cluster__update_topology_files()
# Restart nodes on new version
for node in nodes:
logger.debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=240, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def _log_current_ver(self, current_version_meta):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = [m.version for m in self.test_version_metas]
curr_index = vers.index(current_version_meta.version)
logger.debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
def _create_schema_for_rolling(self):
"""
Slightly different schema variant for testing rolling upgrades with quorum reads/writes.
"""
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k uuid PRIMARY KEY, v uuid )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 uuid,
c counter,
PRIMARY KEY (k1)
);""")
def _create_schema(self):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY, v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def _write_values(self, num=100):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade")
for i in range(num):
x = len(self.row_values) + 1
session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
self.row_values.add(x)
def _check_values(self, consistency_level=ConsistencyLevel.ALL):
for node in self.cluster.nodelist():
session = self.patient_cql_connection(node, protocol_version=self.protocol_version)
session.execute("use upgrade")
for x in self.row_values:
query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
result = session.execute(query)
k, v = result[0]
assert x == k
assert str(x) == v
def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
"""
Waits up to max_wait_s for queue size to return True when evaluated against a condition function from the operator module.
Label is just a string identifier for easier debugging.
On Mac OS X may not be able to check queue size, in which case it will not block.
If time runs out, raises RuntimeError.
"""
wait_end_time = time.time() + max_wait_s
while time.time() < wait_end_time:
try:
qsize = queue.qsize()
except NotImplementedError:
logger.debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
break
if opfunc(qsize, required_len):
logger.debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
break
if divmod(round(time.time()), 30)[1] == 0:
logger.debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
time.sleep(0.1)
continue
else:
raise RuntimeError("Ran out of time waiting for queue size ({}) to be '{}' to {}. Aborting.".format(qsize, opfunc.__name__, required_len))
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.fixture_dtest_setup.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.fixture_dtest_setup.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
def _increment_counters(self, opcount=25000):
logger.debug("performing {opcount} counter increments".format(opcount=opcount))
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k1='{key1}' and k2={key2}")
self.expected_counts = {}
for i in range(10):
self.expected_counts[uuid.uuid4()] = defaultdict(int)
fail_count = 0
for i in range(opcount):
key1 = random.choice(list(self.expected_counts.keys()))
key2 = random.randint(1, 10)
try:
query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
session.execute(query)
except WriteTimeout:
fail_count += 1
else:
self.expected_counts[key1][key2] += 1
if fail_count > 100:
break
assert fail_count, 100 < "Too many counter increment failures"
def _check_counters(self):
logger.debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in list(self.expected_counts.keys()):
for key2 in list(self.expected_counts[key1].keys()):
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
assert actual_value == expected_value
def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
logger.debug("Checking SELECT COUNT(*)")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
expected_num_rows = len(self.row_values)
countquery = SimpleStatement("SELECT COUNT(*) FROM cf;", consistency_level=consistency_level)
result = session.execute(countquery)
if result is not None:
actual_num_rows = result[0][0]
assert actual_num_rows == expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows)
else:
self.fail("Count query did not return")
class BootstrapMixin(object):
"""
Can be mixed into UpgradeTester or a subclass thereof to add bootstrap tests.
Using this class is not currently feasible on lengthy upgrade paths, as each
version bump adds a node and this will eventually exhaust resources.
"""
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def test_bootstrap(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
def test_bootstrap_multidc(self):
# try and add a new node
# multi dc, 2 nodes in each dc
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
cluster.populate([2, 2])
[node.start(use_jna=True, wait_for_binary_proto=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
session = self.patient_cql_connection(self.cluster.nodelist()[0], protocol_version=self.protocol_version)
if self.cluster.version() >= '1.2':
# DDL for C* 1.2+
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':2};")
else:
# DDL for C* 1.1
session.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':2;
""")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def create_upgrade_class(clsname, version_metas, protocol_version,
bootstrap_test=False, extra_config=None):
"""
Dynamically creates a test subclass for testing the given versions.
'clsname' is the name of the new class.
'protocol_version' is an int.
'bootstrap_test' is a boolean, if True bootstrap testing will be included. Default False.
'version_list' is a list of versions ccm will recognize, to be upgraded in order.
'extra_config' is tuple of config options that can (eventually) be cast as a dict,
e.g. (('partitioner', org.apache.cassandra.dht.Murmur3Partitioner''))
"""
if extra_config is None:
extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
if bootstrap_test:
parent_classes = (TestUpgrade, BootstrapMixin)
else:
parent_classes = (TestUpgrade,)
# short names for debug output
parent_class_names = [cls.__name__ for cls in parent_classes]
print("Creating test class {} ".format(clsname))
print(" for C* versions:\n{} ".format(pprint.pformat(version_metas)))
print(" using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
print(" to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
if not upgrade_applies_to_env:
pytest.mark.skip(reason='test not applicable to env.')
newcls = type(
clsname,
parent_classes,
{'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
)
if clsname in globals():
raise RuntimeError("Class by name already exists!")
globals()[clsname] = newcls
return newcls
MultiUpgrade = namedtuple('MultiUpgrade', ('name', 'version_metas', 'protocol_version', 'extra_config'))
MULTI_UPGRADES = (
# Proto v1 upgrades (v1 supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1, extra_config=None),
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v2 upgrades (v2 is supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2, extra_config=None),
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v3 upgrades (v3 is supported on 2.1, 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3, extra_config=None),
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v4 upgrades (v4 is supported on 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4, extra_config=None),
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
)
for upgrade in MULTI_UPGRADES:
# if any version_metas are None, this means they are versions not to be tested currently
if all(upgrade.version_metas):
metas = upgrade.version_metas
if not RUN_STATIC_UPGRADE_MATRIX:
if metas[-1].matches_current_env_version_family:
# looks like this test should actually run in the current env, so let's set the final version to match the env exactly
oldmeta = metas[-1]
newmeta = oldmeta.clone_with_local_env_version()
logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
metas[-1] = newmeta
create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)
for pair in build_upgrade_pairs():
create_upgrade_class(
'Test' + pair.name,
[pair.starting_meta, pair.upgrade_meta],
protocol_version=pair.starting_meta.max_proto_v,
bootstrap_test=True
)
|
from pybrain.rl.agents.logging import LoggingAgent
from pybrain.rl.agents.learning import LearningAgent
from scipy import where
from random import choice
class BejeweledAgent(LearningAgent):
def getAction(self):
# get best action for every state observation
# overlay all action values for every state observation, pick best
LoggingAgent.getAction(self)
# for each color, get best action, then pick highest-value action
# among those actions
actions = []
values = []
# TODO: why are same values printed many times in a row here?
#print '========== in agent =========='
#print 'states:', [[i] for i in self.lastobs.flatten()]
for state in self.lastobs:
#print 'state:', state
actions.append(self.module.activate(state))
values.append(self.module.lastMaxActionValue)
#self.module.printState(state)
#print ' best:', actions[-1], 'value:', values[-1]
actionIdx = where(values == max(values))[0]
ch = choice(actionIdx)
self.lastaction = actions[ch]
self.bestState = self.lastobs[ch]
#print 'assigning reward to state', self.bestState
#print 'chosen action:', self.lastaction, 'value:', max(values)
# add a chance to pick a random other action
if self.learning:
self.lastaction = self.learner.explore(self.lastobs, self.lastaction)
#print 'after explorer:', self.lastaction
#print '============= end ============'
return self.lastaction
def giveReward(self, r):
"""Step 3: store observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
assert self.lastreward == None
self.lastreward = r
# store state, action and reward in dataset if logging is enabled
if self.logging:
# TODO: assigning reward to only best estimate for now
#for state in self.lastobs:
# TODO: assign reward to state correctly? NO because we're in
# the learner -- learning will be slower though, because of
# false positives for every obs
self.history.addSample(self.bestState, self.lastaction, self.lastreward)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
import os
import sys
import time
import unittest
import ray
import ray.test.test_utils
class ActorAPI(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testKeywordArgs(self):
ray.init(num_workers=0, driver_mode=ray.SILENT_MODE)
@ray.remote
class Actor(object):
def __init__(self, arg0, arg1=1, arg2="a"):
self.arg0 = arg0
self.arg1 = arg1
self.arg2 = arg2
def get_values(self, arg0, arg1=2, arg2="b"):
return self.arg0 + arg0, self.arg1 + arg1, self.arg2 + arg2
actor = Actor.remote(0)
self.assertEqual(ray.get(actor.get_values.remote(1)), (1, 3, "ab"))
actor = Actor.remote(1, 2)
self.assertEqual(ray.get(actor.get_values.remote(2, 3)), (3, 5, "ab"))
actor = Actor.remote(1, 2, "c")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, "d")),
(3, 5, "cd"))
actor = Actor.remote(1, arg2="c")
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d")),
(1, 3, "cd"))
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d", arg1=0)),
(1, 1, "cd"))
actor = Actor.remote(1, arg2="c", arg1=2)
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d")),
(1, 4, "cd"))
self.assertEqual(ray.get(actor.get_values.remote(0, arg2="d", arg1=0)),
(1, 2, "cd"))
# Make sure we get an exception if the constructor is called
# incorrectly.
with self.assertRaises(Exception):
actor = Actor.remote()
with self.assertRaises(Exception):
actor = Actor.remote(0, 1, 2, arg3=3)
# Make sure we get an exception if the method is called incorrectly.
actor = Actor.remote(1)
with self.assertRaises(Exception):
ray.get(actor.get_values.remote())
def testVariableNumberOfArgs(self):
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self, arg0, arg1=1, *args):
self.arg0 = arg0
self.arg1 = arg1
self.args = args
def get_values(self, arg0, arg1=2, *args):
return self.arg0 + arg0, self.arg1 + arg1, self.args, args
actor = Actor.remote(0)
self.assertEqual(ray.get(actor.get_values.remote(1)), (1, 3, (), ()))
actor = Actor.remote(1, 2)
self.assertEqual(ray.get(actor.get_values.remote(2, 3)),
(3, 5, (), ()))
actor = Actor.remote(1, 2, "c")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, "d")),
(3, 5, ("c",), ("d",)))
actor = Actor.remote(1, 2, "a", "b", "c", "d")
self.assertEqual(ray.get(actor.get_values.remote(2, 3, 1, 2, 3, 4)),
(3, 5, ("a", "b", "c", "d"), (1, 2, 3, 4)))
@ray.remote
class Actor(object):
def __init__(self, *args):
self.args = args
def get_values(self, *args):
return self.args, args
a = Actor.remote()
self.assertEqual(ray.get(a.get_values.remote()), ((), ()))
a = Actor.remote(1)
self.assertEqual(ray.get(a.get_values.remote(2)), ((1,), (2,)))
a = Actor.remote(1, 2)
self.assertEqual(ray.get(a.get_values.remote(3, 4)), ((1, 2), (3, 4)))
def testNoArgs(self):
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self):
pass
def get_values(self):
pass
actor = Actor.remote()
self.assertEqual(ray.get(actor.get_values.remote()), None)
def testNoConstructor(self):
# If no __init__ method is provided, that should not be a problem.
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def get_values(self):
pass
actor = Actor.remote()
self.assertEqual(ray.get(actor.get_values.remote()), None)
def testCustomClasses(self):
ray.init(num_workers=0)
class Foo(object):
def __init__(self, x):
self.x = x
@ray.remote
class Actor(object):
def __init__(self, f2):
self.f1 = Foo(1)
self.f2 = f2
def get_values1(self):
return self.f1, self.f2
def get_values2(self, f3):
return self.f1, self.f2, f3
actor = Actor.remote(Foo(2))
results1 = ray.get(actor.get_values1.remote())
self.assertEqual(results1[0].x, 1)
self.assertEqual(results1[1].x, 2)
results2 = ray.get(actor.get_values2.remote(Foo(3)))
self.assertEqual(results2[0].x, 1)
self.assertEqual(results2[1].x, 2)
self.assertEqual(results2[2].x, 3)
def testCachingActors(self):
# Test defining actors before ray.init() has been called.
@ray.remote
class Foo(object):
def __init__(self):
pass
def get_val(self):
return 3
# Check that we can't actually create actors before ray.init() has been
# called.
with self.assertRaises(Exception):
f = Foo.remote()
ray.init(num_workers=0)
f = Foo.remote()
self.assertEqual(ray.get(f.get_val.remote()), 3)
def testDecoratorArgs(self):
ray.init(num_workers=0, driver_mode=ray.SILENT_MODE)
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote()
class Actor(object):
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote(invalid_kwarg=0) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is an invalid way of using the actor decorator.
with self.assertRaises(Exception):
@ray.remote(num_cpus=0, invalid_kwarg=0) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_gpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
# This is a valid way of using the decorator.
@ray.remote(num_cpus=1, num_gpus=1) # noqa: F811
class Actor(object):
def __init__(self):
pass
def testRandomIDGeneration(self):
ray.init(num_workers=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
# Make sure that seeding numpy does not interfere with the generation
# of actor IDs.
np.random.seed(1234)
random.seed(1234)
f1 = Foo.remote()
np.random.seed(1234)
random.seed(1234)
f2 = Foo.remote()
self.assertNotEqual(f1._ray_actor_id.id(), f2._ray_actor_id.id())
def testActorClassName(self):
ray.init(num_workers=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
Foo.remote()
r = ray.worker.global_worker.redis_client
actor_keys = r.keys("ActorClass*")
self.assertEqual(len(actor_keys), 1)
actor_class_info = r.hgetall(actor_keys[0])
self.assertEqual(actor_class_info[b"class_name"], b"Foo")
self.assertEqual(actor_class_info[b"module"], b"__main__")
class ActorMethods(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testDefineActor(self):
ray.init()
@ray.remote
class Test(object):
def __init__(self, x):
self.x = x
def f(self, y):
return self.x + y
t = Test.remote(2)
self.assertEqual(ray.get(t.f.remote(1)), 3)
# Make sure that calling an actor method directly raises an exception.
with self.assertRaises(Exception):
t.f(1)
def testActorDeletion(self):
ray.init(num_workers=0)
# Make sure that when an actor handles goes out of scope, the actor
# destructor is called.
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
a = None
ray.test.test_utils.wait_for_pid_to_exit(pid)
actors = [Actor.remote() for _ in range(10)]
pids = ray.get([a.getpid.remote() for a in actors])
a = None
actors = None
[ray.test.test_utils.wait_for_pid_to_exit(pid) for pid in pids]
@ray.remote
class Actor(object):
def method(self):
return 1
# Make sure that if we create an actor and call a method on it
# immediately, the actor doesn't get killed before the method is
# called.
self.assertEqual(ray.get(Actor.remote().method.remote()), 1)
def testActorDeletionWithGPUs(self):
ray.init(num_workers=0, num_gpus=1)
# When an actor that uses a GPU exits, make sure that the GPU resources
# are released.
@ray.remote(num_gpus=1)
class Actor(object):
def getpid(self):
return os.getpid()
for _ in range(5):
# If we can successfully create an actor, that means that enough
# GPU resources are available.
a = Actor.remote()
pid = ray.get(a.getpid.remote())
# Make sure that we can't create another actor.
with self.assertRaises(Exception):
Actor.remote()
# Let the actor go out of scope, and wait for it to exit.
a = None
ray.test.test_utils.wait_for_pid_to_exit(pid)
def testActorState(self):
ray.init()
@ray.remote
class Counter(object):
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
def value(self):
return self.value
c1 = Counter.remote()
c1.increase.remote()
self.assertEqual(ray.get(c1.value.remote()), 1)
c2 = Counter.remote()
c2.increase.remote()
c2.increase.remote()
self.assertEqual(ray.get(c2.value.remote()), 2)
def testMultipleActors(self):
# Create a bunch of actors and call a bunch of methods on all of them.
ray.init(num_workers=0)
@ray.remote
class Counter(object):
def __init__(self, value):
self.value = value
def increase(self):
self.value += 1
return self.value
def reset(self):
self.value = 0
num_actors = 20
num_increases = 50
# Create multiple actors.
actors = [Counter.remote(i) for i in range(num_actors)]
results = []
# Call each actor's method a bunch of times.
for i in range(num_actors):
results += [actors[i].increase.remote()
for _ in range(num_increases)]
result_values = ray.get(results)
for i in range(num_actors):
self.assertEqual(
result_values[(num_increases * i):(num_increases * (i + 1))],
list(range(i + 1, num_increases + i + 1)))
# Reset the actor values.
[actor.reset.remote() for actor in actors]
# Interweave the method calls on the different actors.
results = []
for j in range(num_increases):
results += [actor.increase.remote() for actor in actors]
result_values = ray.get(results)
for j in range(num_increases):
self.assertEqual(
result_values[(num_actors * j):(num_actors * (j + 1))],
num_actors * [j + 1])
class ActorNesting(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testRemoteFunctionWithinActor(self):
# Make sure we can use remote funtions within actors.
ray.init(num_cpus=100)
# Create some values to close over.
val1 = 1
val2 = 2
@ray.remote
def f(x):
return val1 + x
@ray.remote
def g(x):
return ray.get(f.remote(x))
@ray.remote
class Actor(object):
def __init__(self, x):
self.x = x
self.y = val2
self.object_ids = [f.remote(i) for i in range(5)]
self.values2 = ray.get([f.remote(i) for i in range(5)])
def get_values(self):
return self.x, self.y, self.object_ids, self.values2
def f(self):
return [f.remote(i) for i in range(5)]
def g(self):
return ray.get([g.remote(i) for i in range(5)])
def h(self, object_ids):
return ray.get(object_ids)
actor = Actor.remote(1)
values = ray.get(actor.get_values.remote())
self.assertEqual(values[0], 1)
self.assertEqual(values[1], val2)
self.assertEqual(ray.get(values[2]), list(range(1, 6)))
self.assertEqual(values[3], list(range(1, 6)))
self.assertEqual(ray.get(ray.get(actor.f.remote())), list(range(1, 6)))
self.assertEqual(ray.get(actor.g.remote()), list(range(1, 6)))
self.assertEqual(
ray.get(actor.h.remote([f.remote(i) for i in range(5)])),
list(range(1, 6)))
def testDefineActorWithinActor(self):
# Make sure we can use remote funtions within actors.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def new_actor(self, z):
@ray.remote
class Actor2(object):
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
self.actor2 = Actor2.remote(z)
def get_values(self, z):
self.new_actor(z)
return self.x, ray.get(self.actor2.get_value.remote())
actor1 = Actor1.remote(3)
self.assertEqual(ray.get(actor1.get_values.remote(5)), (3, 5))
def testUseActorWithinActor(self):
# Make sure we can use actors within actors.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_val(self):
return self.x
@ray.remote
class Actor2(object):
def __init__(self, x, y):
self.x = x
self.actor1 = Actor1.remote(y)
def get_values(self, z):
return self.x, ray.get(self.actor1.get_val.remote())
actor2 = Actor2.remote(3, 4)
self.assertEqual(ray.get(actor2.get_values.remote(5)), (3, 4))
def testDefineActorWithinRemoteFunction(self):
# Make sure we can define and actors within remote funtions.
ray.init(num_cpus=10)
@ray.remote
def f(x, n):
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_value(self):
return self.x
actor = Actor1.remote(x)
return ray.get([actor.get_value.remote() for _ in range(n)])
self.assertEqual(ray.get(f.remote(3, 1)), [3])
self.assertEqual(ray.get([f.remote(i, 20) for i in range(10)]),
[20 * [i] for i in range(10)])
def testUseActorWithinRemoteFunction(self):
# Make sure we can create and use actors within remote funtions.
ray.init(num_cpus=10)
@ray.remote
class Actor1(object):
def __init__(self, x):
self.x = x
def get_values(self):
return self.x
@ray.remote
def f(x):
actor = Actor1.remote(x)
return ray.get(actor.get_values.remote())
self.assertEqual(ray.get(f.remote(3)), 3)
def testActorImportCounter(self):
# This is mostly a test of the export counters to make sure that when
# an actor is imported, all of the necessary remote functions have been
# imported.
ray.init(num_cpus=10)
# Export a bunch of remote functions.
num_remote_functions = 50
for i in range(num_remote_functions):
@ray.remote
def f():
return i
@ray.remote
def g():
@ray.remote
class Actor(object):
def __init__(self):
# This should use the last version of f.
self.x = ray.get(f.remote())
def get_val(self):
return self.x
actor = Actor.remote()
return ray.get(actor.get_val.remote())
self.assertEqual(ray.get(g.remote()), num_remote_functions - 1)
class ActorInheritance(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testInheritActorFromClass(self):
# Make sure we can define an actor by inheriting from a regular class.
# Note that actors cannot inherit from other actors.
ray.init()
class Foo(object):
def __init__(self, x):
self.x = x
def f(self):
return self.x
def g(self, y):
return self.x + y
@ray.remote
class Actor(Foo):
def __init__(self, x):
Foo.__init__(self, x)
def get_value(self):
return self.f()
actor = Actor.remote(1)
self.assertEqual(ray.get(actor.get_value.remote()), 1)
self.assertEqual(ray.get(actor.g.remote(5)), 6)
class ActorSchedulingProperties(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testRemoteFunctionsNotScheduledOnActors(self):
# Make sure that regular remote functions are not scheduled on actors.
ray.init(num_workers=0)
@ray.remote
class Actor(object):
def __init__(self):
pass
def get_id(self):
return ray.worker.global_worker.worker_id
a = Actor.remote()
actor_id = ray.get(a.get_id.remote())
@ray.remote
def f():
return ray.worker.global_worker.worker_id
resulting_ids = ray.get([f.remote() for _ in range(100)])
self.assertNotIn(actor_id, resulting_ids)
class ActorsOnMultipleNodes(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testActorsOnNodesWithNoCPUs(self):
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
def __init__(self):
pass
with self.assertRaises(Exception):
Foo.remote()
def testActorLoadBalancing(self):
num_local_schedulers = 3
ray.worker._init(start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers)
@ray.remote
class Actor1(object):
def __init__(self):
pass
def get_location(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Create a bunch of actors.
num_actors = 30
num_attempts = 20
minimum_count = 5
# Make sure that actors are spread between the local schedulers.
attempts = 0
while attempts < num_attempts:
actors = [Actor1.remote() for _ in range(num_actors)]
locations = ray.get([actor.get_location.remote()
for actor in actors])
names = set(locations)
counts = [locations.count(name) for name in names]
print("Counts are {}.".format(counts))
if (len(names) == num_local_schedulers and
all([count >= minimum_count for count in counts])):
break
attempts += 1
self.assertLess(attempts, num_attempts)
# Make sure we can get the results of a bunch of tasks.
results = []
for _ in range(1000):
index = np.random.randint(num_actors)
results.append(actors[index].get_location.remote())
ray.get(results)
class ActorsWithGPUs(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testActorGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 4
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create one actor per GPU.
actors = [Actor1.remote() for _
in range(num_local_schedulers * num_gpus_per_scheduler)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), num_local_schedulers)
location_actor_combinations = []
for node_name in node_names:
for gpu_id in range(num_gpus_per_scheduler):
location_actor_combinations.append((node_name, (gpu_id,)))
self.assertEqual(set(locations_and_ids),
set(location_actor_combinations))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
def testActorMultipleGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 5
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote(num_gpus=2)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors1 = [Actor1.remote() for _ in range(num_local_schedulers * 2)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors1])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), num_local_schedulers)
# Keep track of which GPU IDs are being used for each location.
gpus_in_use = {node_name: [] for node_name in node_names}
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
self.assertEqual(len(set(gpus_in_use[node_name])), 4)
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
# We should be able to create more actors that use only a single GPU.
@ray.remote(num_gpus=1)
class Actor2(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors2 = [Actor2.remote() for _ in range(num_local_schedulers)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors2])
self.assertEqual(node_names,
set([location for location, gpu_id
in locations_and_ids]))
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
self.assertEqual(len(gpus_in_use[node_name]), 5)
self.assertEqual(set(gpus_in_use[node_name]), set(range(5)))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor2.remote()
def testActorDifferentNumbersOfGPUs(self):
# Test that we can create actors on two nodes that have different
# numbers of GPUs.
ray.worker._init(start_ray_local=True, num_workers=0,
num_local_schedulers=3, num_gpus=[0, 5, 10])
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# Create some actors.
actors = [Actor1.remote() for _ in range(0 + 5 + 10)]
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
self.assertEqual(len(node_names), 2)
for node_name in node_names:
node_gpu_ids = [gpu_id for location, gpu_id in locations_and_ids
if location == node_name]
self.assertIn(len(node_gpu_ids), [5, 10])
self.assertEqual(set(node_gpu_ids),
set([(i,) for i in range(len(node_gpu_ids))]))
# Creating a new actor should fail because all of the GPUs are being
# used.
with self.assertRaises(Exception):
Actor1.remote()
def testActorMultipleGPUsFromMultipleTasks(self):
num_local_schedulers = 10
num_gpus_per_scheduler = 10
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers, redirect_output=True,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
@ray.remote
def create_actors(n):
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return ((ray.worker.global_worker.plasma_client
.store_socket_name),
tuple(self.gpu_ids))
# Create n actors.
for _ in range(n):
Actor.remote()
ray.get([create_actors.remote(num_gpus_per_scheduler)
for _ in range(num_local_schedulers)])
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
def get_location_and_ids(self):
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
# All the GPUs should be used up now.
with self.assertRaises(Exception):
Actor.remote()
@unittest.skipIf(sys.version_info < (3, 0), "This test requires Python 3.")
def testActorsAndTasksWithGPUs(self):
num_local_schedulers = 3
num_gpus_per_scheduler = 6
ray.worker._init(
start_ray_local=True, num_workers=0,
num_local_schedulers=num_local_schedulers,
num_cpus=num_gpus_per_scheduler,
num_gpus=(num_local_schedulers * [num_gpus_per_scheduler]))
def check_intervals_non_overlapping(list_of_intervals):
for i in range(len(list_of_intervals)):
for j in range(i):
first_interval = list_of_intervals[i]
second_interval = list_of_intervals[j]
# Check that list_of_intervals[i] and list_of_intervals[j]
# don't overlap.
self.assertLess(first_interval[0], first_interval[1])
self.assertLess(second_interval[0], second_interval[1])
intervals_nonoverlapping = (
first_interval[1] <= second_interval[0] or
second_interval[1] <= first_interval[0])
assert intervals_nonoverlapping, (
"Intervals {} and {} are overlapping."
.format(first_interval, second_interval))
@ray.remote(num_gpus=1)
def f1():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in range(num_gpus_per_scheduler)
return (ray.worker.global_worker.plasma_client.store_socket_name,
tuple(gpu_ids), [t1, t2])
@ray.remote(num_gpus=2)
def f2():
t1 = time.monotonic()
time.sleep(0.1)
t2 = time.monotonic()
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in range(num_gpus_per_scheduler)
assert gpu_ids[1] in range(num_gpus_per_scheduler)
return (ray.worker.global_worker.plasma_client.store_socket_name,
tuple(gpu_ids), [t1, t2])
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
assert self.gpu_ids[0] in range(num_gpus_per_scheduler)
def get_location_and_ids(self):
assert ray.get_gpu_ids() == self.gpu_ids
return (
ray.worker.global_worker.plasma_client.store_socket_name,
tuple(self.gpu_ids))
def locations_to_intervals_for_many_tasks():
# Launch a bunch of GPU tasks.
locations_ids_and_intervals = ray.get(
[f1.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)] +
[f2.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)] +
[f1.remote() for _
in range(5 * num_local_schedulers * num_gpus_per_scheduler)])
locations_to_intervals = collections.defaultdict(lambda: [])
for location, gpu_ids, interval in locations_ids_and_intervals:
for gpu_id in gpu_ids:
locations_to_intervals[(location, gpu_id)].append(interval)
return locations_to_intervals
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Create an actor that uses a GPU.
a = Actor1.remote()
actor_location = ray.get(a.get_location_and_ids.remote())
actor_location = (actor_location[0], actor_location[1][0])
# This check makes sure that actor_location is formatted the same way
# that the keys of locations_to_intervals are formatted.
self.assertIn(actor_location, locations_to_intervals)
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all but one of the GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler - 1)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Make sure that the actor's GPU was not used.
self.assertNotIn(actor_location, locations_to_intervals)
# Create several more actors that use GPUs.
actors = [Actor1.remote() for _ in range(3)]
actor_locations = ray.get([actor.get_location_and_ids.remote()
for actor in actors])
# Run a bunch of GPU tasks.
locations_to_intervals = locations_to_intervals_for_many_tasks()
# Make sure that all but 11 of the GPUs were used.
self.assertEqual(len(locations_to_intervals),
num_local_schedulers * num_gpus_per_scheduler - 1 - 3)
# For each GPU, verify that the set of tasks that used this specific
# GPU did not overlap in time.
for locations in locations_to_intervals:
check_intervals_non_overlapping(locations_to_intervals[locations])
# Make sure that the GPUs were not used.
self.assertNotIn(actor_location, locations_to_intervals)
for location in actor_locations:
self.assertNotIn(location, locations_to_intervals)
# Create more actors to fill up all the GPUs.
more_actors = [Actor1.remote() for _ in
range(num_local_schedulers *
num_gpus_per_scheduler - 1 - 3)]
# Wait for the actors to finish being created.
ray.get([actor.get_location_and_ids.remote() for actor in more_actors])
# Now if we run some GPU tasks, they should not be scheduled.
results = [f1.remote() for _ in range(30)]
ready_ids, remaining_ids = ray.wait(results, timeout=1000)
self.assertEqual(len(ready_ids), 0)
def testActorsAndTasksWithGPUsVersionTwo(self):
# Create tasks and actors that both use GPUs and make sure that they
# are given different GPUs
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_gpus=1)
def f():
time.sleep(4)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
@ray.remote(num_gpus=1)
class Actor(object):
def __init__(self):
self.gpu_ids = ray.get_gpu_ids()
assert len(self.gpu_ids) == 1
def get_gpu_id(self):
assert ray.get_gpu_ids() == self.gpu_ids
return self.gpu_ids[0]
results = []
actors = []
for _ in range(5):
results.append(f.remote())
a = Actor.remote()
results.append(a.get_gpu_id.remote())
# Prevent the actor handle from going out of scope so that its GPU
# resources don't get released.
actors.append(a)
gpu_ids = ray.get(results)
self.assertEqual(set(gpu_ids), set(range(10)))
@unittest.skipIf(sys.version_info < (3, 0), "This test requires Python 3.")
def testActorsAndTaskResourceBookkeeping(self):
ray.init(num_cpus=1)
@ray.remote
class Foo(object):
def __init__(self):
start = time.monotonic()
time.sleep(0.1)
end = time.monotonic()
self.interval = (start, end)
def get_interval(self):
return self.interval
def sleep(self):
start = time.monotonic()
time.sleep(0.01)
end = time.monotonic()
return start, end
# First make sure that we do not have more actor methods running at a
# time than we have CPUs.
actors = [Foo.remote() for _ in range(4)]
interval_ids = []
interval_ids += [actor.get_interval.remote() for actor in actors]
for _ in range(4):
interval_ids += [actor.sleep.remote() for actor in actors]
# Make sure that the intervals don't overlap.
intervals = ray.get(interval_ids)
intervals.sort(key=lambda x: x[0])
for interval1, interval2 in zip(intervals[:-1], intervals[1:]):
self.assertLess(interval1[0], interval1[1])
self.assertLess(interval1[1], interval2[0])
self.assertLess(interval2[0], interval2[1])
def testBlockingActorTask(self):
ray.init(num_cpus=1, num_gpus=1)
@ray.remote(num_gpus=1)
def f():
return 1
@ray.remote
class Foo(object):
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure we can execute a blocking actor method even if there is
# only one CPU.
actor = Foo.remote()
ray.get(actor.blocking_method.remote())
@ray.remote(num_gpus=1)
class GPUFoo(object):
def __init__(self):
pass
def blocking_method(self):
ray.get(f.remote())
# Make sure that we GPU resources are not released when actors block.
actor = GPUFoo.remote()
x_id = actor.blocking_method.remote()
ready_ids, remaining_ids = ray.wait([x_id], timeout=500)
self.assertEqual(ready_ids, [])
self.assertEqual(remaining_ids, [x_id])
class ActorReconstruction(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testLocalSchedulerDying(self):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=True)
@ray.remote
class Counter(object):
def __init__(self):
self.x = 0
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self):
self.x += 1
return self.x
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote()
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote()
ids = [actor.inc.remote() for _ in range(100)]
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding local scheduler to exit.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Get all of the results
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
def testManyLocalSchedulersDying(self):
# This test can be made more stressful by increasing the numbers below.
# The total number of actors created will be
# num_actors_at_a_time * num_local_schedulers.
num_local_schedulers = 5
num_actors_at_a_time = 3
num_function_calls_at_a_time = 10
ray.worker._init(start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_workers=0, redirect_output=True)
@ray.remote
class SlowCounter(object):
def __init__(self):
self.x = 0
def inc(self, duration):
time.sleep(duration)
self.x += 1
return self.x
# Create some initial actors.
actors = [SlowCounter.remote() for _ in range(num_actors_at_a_time)]
# Wait for the actors to start up.
time.sleep(1)
# This is a mapping from actor handles to object IDs returned by
# methods on that actor.
result_ids = collections.defaultdict(lambda: [])
# In a loop we are going to create some actors, run some methods, kill
# a local scheduler, and run some more methods.
for i in range(num_local_schedulers - 1):
# Create some actors.
actors.extend([SlowCounter.remote()
for _ in range(num_actors_at_a_time)])
# Run some methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(
actor.inc.remote(j ** 2 * 0.000001))
# Kill a plasma store to get rid of the cached objects and trigger
# exit of the corresponding local scheduler. Don't kill the first
# local scheduler since that is the one that the driver is
# connected to.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][i + 1]
process.kill()
process.wait()
# Run some more methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(
actor.inc.remote(j ** 2 * 0.000001))
# Get the results and check that they have the correct values.
for _, result_id_list in result_ids.items():
self.assertEqual(ray.get(result_id_list),
list(range(1, len(result_id_list) + 1)))
def setup_test_checkpointing(self, save_exception=False,
resume_exception=False):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=True)
@ray.remote(checkpoint_interval=5)
class Counter(object):
_resume_exception = resume_exception
def __init__(self, save_exception):
self.x = 0
# The number of times that inc has been called. We won't bother
# restoring this in the checkpoint
self.num_inc_calls = 0
self.save_exception = save_exception
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self, *xs):
self.num_inc_calls += 1
self.x += 1
return self.x
def get_num_inc_calls(self):
return self.num_inc_calls
def test_restore(self):
# This method will only work if __ray_restore__ has been run.
return self.y
def __ray_save__(self):
if self.save_exception:
raise Exception("Exception raised in checkpoint save")
return self.x, -1
def __ray_restore__(self, checkpoint):
if self._resume_exception:
raise Exception("Exception raised in checkpoint resume")
self.x, val = checkpoint
self.num_inc_calls = 0
# Test that __ray_save__ has been run.
assert val == -1
self.y = self.x
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote(save_exception)
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote(save_exception)
args = [ray.put(0) for _ in range(100)]
ids = [actor.inc.remote(*args[i:]) for i in range(100)]
return actor, ids
def testCheckpointing(self):
actor, ids = self.setup_test_checkpointing()
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Get all of the results. TODO(rkn): This currently doesn't work.
# results = ray.get(ids)
# self.assertEqual(results, list(range(1, 1 + len(results))))
self.assertEqual(ray.get(actor.test_restore.remote()), 99)
# The inc method should only have executed once on the new actor (for
# the one method call since the most recent checkpoint).
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 1)
def testLostCheckpoint(self):
actor, ids = self.setup_test_checkpointing()
# Wait for the first fraction of tasks to finish running.
ray.get(ids[len(ids) // 10])
actor_key = b"Actor:" + actor._ray_actor_id.id()
for index in ray.actor.get_checkpoint_indices(
ray.worker.global_worker, actor._ray_actor_id.id()):
ray.worker.global_worker.redis_client.hdel(
actor_key, "checkpoint_{}".format(index))
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
def testCheckpointException(self):
actor, ids = self.setup_test_checkpointing(save_exception=True)
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor, since all
# checkpoint saves failed.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
errors = ray.error_info()
# We submitted 101 tasks with a checkpoint interval of 5.
num_checkpoints = 101 // 5
# Each checkpoint task throws an exception when saving during initial
# execution, and then again during re-execution.
self.assertEqual(len([error for error in errors if error[b"type"] ==
b"task"]), num_checkpoints * 2)
def testCheckpointResumeException(self):
actor, ids = self.setup_test_checkpointing(resume_exception=True)
# Wait for the last task to finish running.
ray.get(ids[-1])
# Kill the corresponding plasma store to get rid of the cached objects.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
self.assertEqual(ray.get(actor.inc.remote()), 101)
# Each inc method has been reexecuted once on the new actor, since all
# checkpoint resumes failed.
self.assertEqual(ray.get(actor.get_num_inc_calls.remote()), 101)
# Get all of the results that were previously lost. Because the
# checkpoints were lost, all methods should be reconstructed.
results = ray.get(ids)
self.assertEqual(results, list(range(1, 1 + len(results))))
errors = ray.error_info()
# The most recently executed checkpoint task should throw an exception
# when trying to resume. All other checkpoint tasks should reconstruct
# the previous task but throw no errors.
self.assertTrue(len([error for error in errors if error[b"type"] ==
b"task"]) > 0)
class DistributedActorHandles(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def make_counter_actor(self, checkpoint_interval=-1):
ray.init()
@ray.remote(checkpoint_interval=checkpoint_interval)
class Counter(object):
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
return Counter.remote()
def testFork(self):
counter = self.make_counter_actor()
num_calls = 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@ray.remote
def fork(counter):
return ray.get(counter.increase.remote())
# Fork once.
num_calls += 1
self.assertEqual(ray.get(fork.remote(counter)), num_calls)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
# Fork num_iters times.
num_iters = 100
num_calls += num_iters
ray.get([fork.remote(counter) for _ in range(num_iters)])
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
def testForkConsistency(self):
counter = self.make_counter_actor()
@ray.remote
def fork_many_incs(counter, num_incs):
x = None
for _ in range(num_incs):
x = counter.increase.remote()
# Only call ray.get() on the last task submitted.
return ray.get(x)
num_incs = 100
# Fork once.
num_calls = num_incs
self.assertEqual(ray.get(fork_many_incs.remote(counter, num_incs)),
num_calls)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
# Fork num_iters times.
num_iters = 10
num_calls += num_iters * num_incs
ray.get([fork_many_incs.remote(counter, num_incs) for _ in
range(num_iters)])
# Check that we ensured per-handle serialization.
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@unittest.skip("Garbage collection for distributed actor handles not "
"implemented.")
def testGarbageCollection(self):
counter = self.make_counter_actor()
@ray.remote
def fork(counter):
for _ in range(10):
x = counter.increase.remote()
time.sleep(0.1)
return ray.get(x)
x = fork.remote(counter)
ray.get(counter.increase.remote())
del counter
print(ray.get(x))
def testCheckpoint(self):
counter = self.make_counter_actor(checkpoint_interval=1)
num_calls = 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@ray.remote
def fork(counter):
return ray.get(counter.increase.remote())
# Passing an actor handle with checkpointing enabled shouldn't be
# allowed yet.
with self.assertRaises(Exception):
fork.remote(counter)
num_calls += 1
self.assertEqual(ray.get(counter.increase.remote()), num_calls)
@unittest.skip("Fork/join consistency not yet implemented.")
def testLocalSchedulerDying(self):
ray.worker._init(start_ray_local=True, num_local_schedulers=2,
num_workers=0, redirect_output=False)
@ray.remote
class Counter(object):
def __init__(self):
self.x = 0
def local_plasma(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def inc(self):
self.x += 1
return self.x
@ray.remote
def foo(counter):
for _ in range(100):
x = counter.inc.remote()
return ray.get(x)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# Create an actor that is not on the local scheduler.
actor = Counter.remote()
while ray.get(actor.local_plasma.remote()) == local_plasma:
actor = Counter.remote()
# Concurrently, submit many tasks to the actor through the original
# handle and the forked handle.
x = foo.remote(actor)
ids = [actor.inc.remote() for _ in range(100)]
# Wait for the last task to finish running.
ray.get(ids[-1])
y = ray.get(x)
# Kill the second plasma store to get rid of the cached objects and
# trigger the corresponding local scheduler to exit.
process = ray.services.all_processes[
ray.services.PROCESS_TYPE_PLASMA_STORE][1]
process.kill()
process.wait()
# Submit a new task. Its results should reflect the tasks submitted
# through both the original handle and the forked handle.
self.assertEqual(ray.get(actor.inc.remote()), y + 1)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
# # # # # compare tasmin, tas, tasmax in a timeseries of GeoTiff files # # # #
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def rasterize( shapes, coords, latitude='latitude', longitude='longitude', fill=None, **kwargs ):
'''
Rasterize a list of (geometry, fill_value) tuples onto the given
xarray coordinates. This only works for 1d latitude and longitude
arrays.
'''
from rasterio import features
if fill == None:
fill = np.nan
transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] )
out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) )
raster = features.rasterize(shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs)
spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]}
return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude))
def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
def masked_mean( fn, bounds=None ):
''' get mean of the full domain since the data are already clipped
mostly used for processing lots of files in parallel.'''
import numpy as np
import rasterio
with rasterio.open( fn ) as rst:
if bounds:
window = rst.window( *bounds )
else:
window = rst.window( *rst.bounds )
mask = (rst.read_masks( 1 ) == 0)
arr = np.ma.masked_array( rst.read( 1, window=window ), mask=mask )
return np.mean( arr )
if __name__ == '__main__':
import os, glob
import geopandas as gpd
import numpy as np
import xarray as xr
import matplotlib
matplotlib.use( 'agg' )
from matplotlib import pyplot as plt
from pathos.mp_map import mp_map
import pandas as pd
import geopandas as gpd
# args / set working dir
base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data'
os.chdir( base_dir )
# scenarios = ['rcp60', 'rcp85']
scenarios = ['historical']
shp_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/SCTC_studyarea/Kenai_StudyArea.shp'
shp = gpd.read_file( shp_fn )
bounds = shp.bounds
# models = ['5ModelAvg','CRU_TS323','GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4']
# models = ['GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4']
models = ['ts323']
variables_list = [['pr']]# ['tasmax', 'tas', 'tasmin']]#,
# models = ['CRU_TS323']
# begin_end_groups = [[2016,2016],[2010,2020],[2095, 2100]]
begin_end_groups = [[1916, 1916],[1950, 1960],[1995, 2000]]
for scenario in scenarios:
for variables in variables_list:
for m in models:
for begin, end in begin_end_groups: # not fully wired-up yet
if m == 'ts323':
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/CRU/CRU_TS32'
# begin = 1950
# end = 1965
else:
if scenario == 'historical':
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/AR5_CMIP5_models'
# begin = 1950
# end = 1965
else:
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/projected/AR5_CMIP5_models'
# begin = 2095
# end = 2100
figsize = (16,9)
out = {}
for v in variables:
path = os.path.join( base_dir,'downscaled', m, scenario, v )
print( path )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v ] = mp_map( masked_mean, files, nproc=4 )
if v == 'tas' or v == 'pr':
if m == 'ts323':
path = os.path.join( old_dir, v )
print( path )
else:
path = os.path.join( old_dir, scenario, m, v )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v+'_old' ] = mp_map( masked_mean, files, nproc=4 )
# nofix
path = os.path.join( base_dir,'downscaled_pr_nofix', m, scenario, v )
print( path )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v+'_nofix' ] = mp_map( masked_mean, files, nproc=4 )
plot_df = pd.DataFrame( out )
plot_df.index = pd.date_range( start=str(begin), end=str(end+1), freq='M' )
# sort the columns for output plotting cleanliness:
if 'tas' in variables:
col_list = ['tasmax', 'tas_old', 'tas', 'tasmin']
elif 'pr' in variables:
col_list = ['pr', 'pr_old', 'pr_nofix']
plot_df = plot_df[ col_list ] # get em in the order for plotting
if v == 'pr':
plot_df = plot_df.round()[['pr','pr_old']]
# now plot the dataframe
if begin == end:
title = 'EPSCoR SC AOI Temp Metrics {} {} {}'.format( m, scenario, begin )
else:
title = 'EPSCoR SC AOI Temp Metrics {} {} {} - {}'.format( m, scenario, begin, end )
if 'tas' in variables:
colors = ['red', 'black', 'blue', 'red' ]
else:
colors = [ 'blue', 'black', 'darkred' ]
ax = plot_df.plot( kind='line', title=title, figsize=figsize, color=colors )
output_dir = os.path.join( base_dir, 'compare_downscaling_versions_PR_no_fix' )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# now plot the dataframe
out_metric_fn = 'temps'
if 'pr' in variables:
out_metric_fn = 'prec'
if begin == end:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin ) )
else:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin, end ) )
plt.savefig( output_filename, dpi=400 )
plt.close()
# # # PRISM TEST VERSION DIFFERENCES # # # # # # #
# import rasterio
# import numpy as np
# import os, glob, itertools
# base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism/raw_prism'
# variables = [ 'tmax', 'tmin' ]
# for variable in variables:
# ak_olds = sorted( glob.glob( os.path.join( base_path, 'prism_raw_older', 'ak', variable, '*.asc' ) ) )
# ak_news = sorted( glob.glob( os.path.join( base_path, 'prism_raw_2016', 'ak', variable, '*.asc' ) ) )
# olds = np.array([ rasterio.open( i ).read( 1 ) for i in ak_olds if '_14' not in i ])
# news = np.array([ rasterio.open( i ).read( 1 ) *.10 for i in ak_news if '_14' not in i ])
# out = olds - news
# out[ (olds == -9999.0) | (news == -9999.0) ] = 0
# uniques = np.unique( out )
# uniques[ uniques > 0.01 ]
|
from vsvbp.container import Item, Bin, Instance
from vsvbp.solver import is_feasible, optimize
import unittest
class OptimizationTestCase(unittest.TestCase):
def setUp(self):
self.items = [Item([0,4,3]), Item([1,1,3]), Item([5,2,1]), Item([3,1,7])]
self.bins = [Bin([5,5,8]), Bin([8,5,9]), Bin([3,3,5])]
def testFeasible(self):
bins = [Bin(self.bins[0].capacities) for i in xrange(5)]
inst = Instance(self.items[:], bins)
assert is_feasible(inst, True)
bins = [Bin(self.bins[0].capacities) for i in xrange(2)]
inst = Instance(self.items[:], bins)
assert not is_feasible(inst, True)
# Warning: this test may fail if the heuristics perform poorly
bins = [Bin(self.bins[1].capacities) for i in xrange(3)]
inst = Instance(self.items[:], bins)
assert is_feasible(inst, True)
bins = [Bin(self.bins[2].capacities) for i in xrange(15)]
inst = Instance(self.items[:], bins)
assert not is_feasible(inst, True)
def testOptimize(self):
# Warning: these tests may fail if the heuristics perform poorly
assert len(optimize(self.items, self.bins[0], True).bins) == 3
assert len(optimize(self.items, self.bins[1], True).bins) == 2
assert optimize(self.items, self.bins[2], True) is None
|
# Copyright (c) 2010 Witchspace <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities for reading litecoin configuration files.
"""
def read_config_file(filename):
"""
Read a simple ``'='``-delimited config file.
Raises :const:`IOError` if unable to open file, or :const:`ValueError`
if an parse error occurs.
"""
f = open(filename)
try:
cfg = {}
for line in f:
line = line.strip()
if line and not line.startswith("#"):
try:
(key, value) = line.split('=', 1)
cfg[key] = value
except ValueError:
pass # Happens when line has no '=', ignore
finally:
f.close()
return cfg
def read_default_config(filename=None):
"""
Read litecoin default configuration from the current user's home directory.
Arguments:
- `filename`: Path to a configuration file in a non-standard location (optional)
"""
if filename is None:
import os
import platform
home = os.getenv("HOME")
if not home:
raise IOError("Home directory not defined, don't know where to look for config file")
if platform.system() == "Darwin":
location = 'Library/Application Support/Litecoin/litecoin.conf'
else:
location = '.litecoin/litecoin.conf'
filename = os.path.join(home, location)
elif filename.startswith("~"):
import os
filename = os.path.expanduser(filename)
try:
return read_config_file(filename)
except (IOError, ValueError):
pass # Cannot read config file, ignore
|
import pytest
from ..connectors.dummy import Dummy
from ..worker import Worker
from ..broker import Broker
from .fixtures import Adder, FakeAdder, AbstractAdder
class TestWorker(object):
@property
def connector(self):
return Dummy()
@property
def broker(self):
return Broker(self.connector)
def test_worker_repr(self):
worker = Worker(self.broker, 'default')
assert repr(worker) == 'Worker(Dummy)'
def test_register_job(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[Adder.name] == Adder
def test_register_abstract_job(self):
worker = Worker(self.broker, 'default')
worker.register_job(AbstractAdder)
assert len(worker.registered_jobs) == 0
def test_register_job_twice(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
worker.register_job(Adder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[Adder.name] == Adder
def test_register_job_overwrite(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
worker.register_job(FakeAdder)
assert len(worker.registered_jobs) == 1
assert worker.registered_jobs[Adder.name] == FakeAdder
def test_job_builder(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
payload = self._job_payload(1, 'adder', 'default', 2, [1, 2], {})
job, args, kwargs = worker._build_job(payload)
assert isinstance(job, Adder)
assert args == [1, 2]
assert kwargs == {}
assert job.id == 1
assert job.queue == 'default'
assert job.retries == 2
def test_invalid_job_builder(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
payload = self._job_payload(1, 'WRONG', 'default', 2, [1, 2], {})
with pytest.raises(ValueError):
worker._build_job(payload)
def test_change_retry_time(self):
worker = Worker(self.broker, 'default')
worker.register_job(Adder)
payload = self._job_payload(1, 'adder', 'default', 2, [1, 2], {})
job, _, _ = worker._build_job(payload)
worker._change_retry_time(job)
assert len(worker.broker.connector.retried_jobs['default']) == 1
assert worker.broker.connector.retried_jobs['default'][0] == (1, 10)
def test_not_change_retry_time(self):
worker = Worker(self.broker, 'default')
worker.register_job(FakeAdder)
payload = self._job_payload(1, 'adder', 'default', 2, [1, 2], {})
job, _, _ = worker._build_job(payload)
worker._change_retry_time(job)
assert len(worker.broker.connector.retried_jobs) == 0
def _job_payload(self, jid, name, queue, retries, args, kwargs):
return {
'name': name,
'queue': queue,
'args': args,
'kwargs': kwargs,
'_metadata': {
'id': jid,
'retries': retries,
'created_on': 'NOW',
'first_execution_on': 'NOW',
},
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from glob import glob
from subprocess import check_output, CalledProcessError
def get_usb_devices():
"""
Lista dispositivos USB conectados
:return:
"""
sdb_devices = map(os.path.realpath, glob('/sys/block/sd*'))
usb_devices = (dev for dev in sdb_devices
if 'usb' in dev.split('/')[5])
return dict((os.path.basename(dev), dev) for dev in usb_devices)
def get_mount_points(devices=None):
"""
Lista pontos de montagem
:param devices:
:return: Lista de tuplas
[('/dev/sdb1', '/media/bisa/BACKUP')]
"""
devices = devices or get_usb_devices() # if devices are None: get_usb_devices
output = check_output(['mount']).splitlines()
is_usb = lambda path: any(dev in str(path) for dev in devices)
usb_info = (line for line in output if is_usb(line.split()[0]))
fullInfo = []
for info in usb_info:
# print(info)
mountURI = info.split()[0]
usbURI = info.split()[2]
# print((info.split().__sizeof__()))
for x in range(3, info.split().__sizeof__()):
if info.split()[x].__eq__("type"):
for m in range(3, x):
usbURI += " "+info.split()[m]
break
fullInfo.append([mountURI.decode('utf-8'), usbURI.decode('utf-8')])
return fullInfo
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureReachabilityReport(Model):
"""Azure reachability report details.
:param aggregation_level: The aggregation level of Azure reachability
report. Can be Country, State or City.
:type aggregation_level: str
:param provider_location:
:type provider_location:
~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReportLocation
:param reachability_report: List of Azure reachability report items.
:type reachability_report:
list[~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReportItem]
"""
_validation = {
'aggregation_level': {'required': True},
'provider_location': {'required': True},
'reachability_report': {'required': True},
}
_attribute_map = {
'aggregation_level': {'key': 'aggregationLevel', 'type': 'str'},
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'reachability_report': {'key': 'reachabilityReport', 'type': '[AzureReachabilityReportItem]'},
}
def __init__(self, aggregation_level, provider_location, reachability_report):
super(AzureReachabilityReport, self).__init__()
self.aggregation_level = aggregation_level
self.provider_location = provider_location
self.reachability_report = reachability_report
|
import unittest
import uuid
from fate_arch.session import computing_session as session
from federatedml.feature.homo_onehot.homo_ohe_arbiter import HomoOneHotArbiter
class TestOHE_alignment(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def test_instance(self):
ohe_alignment_arbiter = HomoOneHotArbiter()
guest_columns = [
{'race_black': ['0', '1'], 'race_hispanic': ['0'], 'race_asian': ['0', '1'], 'race_other': ['1'],
'electivesurgery': ['0', '1']}]
host_columns = [
{'race_black': ['0', '1'], 'race_hispanic': ['0', '1'], 'race_asian': ['0', '1'], 'race_other': ['0'],
'electivesurgery': ['0', '1']}]
aligned_columns = sorted(
ohe_alignment_arbiter.combine_all_column_headers(guest_columns, host_columns)['race_hispanic'])
self.assertTrue(len(aligned_columns) == 2)
self.assertEqual(['0', '1'], aligned_columns)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
|
__all__ = ["Profiler"]
from collections import defaultdict
import cProfile
import datetime
import errno
import functools
import os
import time
from werkzeug.contrib.profiler import ProfilerMiddleware
def profile(category, *profiler_args, **profiler_kwargs):
"""
Decorate a function to run a profiler on the execution of that function.
Arguments are passed through to the ``Profiler`` initialization. Most relevant one
would be ``output_style`` which can be set to either "detailed" or "simple". With
"detailed" the profiler saves the complete ``.prof`` file, with "simple" it saves
only a file with the execution time saved as text.
"""
profiler = Profiler(name=_make_timestamp(), *profiler_args, **profiler_kwargs)
def decorator(f):
@functools.wraps(f)
def wrapper(*f_args, **f_kwargs):
return profiler.call(category, f, args=f_args, kwargs=f_kwargs)
return wrapper
return decorator
class Profiler(object):
"""
Output profiling information for specified function calls and Flask requests.
Enable profiling either by passing ``enable=True`` to the profiler, or setting the
environment variable ``ENABLE_PYTHON_PROFILING`` to ``True``. The profiler is
intended to be used everywhere that profiling *might* be desirable; if enabled it
will actually do the profiling and save the results, and otherwise it will just pass
through function calls at no additional runtime cost (aside from its method call).
The profiler singleton for a flask application saves profiling files into the
directory specified at initialization. All files use the standard format for python
profiling; use ``pstats`` to tabulate the information from one or more files, or a
visualization tool like ``snakeviz``.
Some example usage for a generic flask app, including profiling a couple setup
functions, as well as the application's endpoints:
def app_init(app):
profiler = Profiler(logger=app.logger)
init_functions = [app_register_blueprints, db_init]
for f in init_functions:
profiler.call("init", f, app)
profiler.profile_app(app)
The output for this Flask application might look like this:
profile/
2018-11-30T15:15:36.14/
init/
app_register_blueprints-1.prof
db_init-1.prof
run/
traverse-1.prof
traverse-2.prof
traverse-3.prof
wsgi/
GET.root.000003ms.1543612537.prof
GET._status.000019ms.1543612539.prof
In this example the ``directory`` argument is ``"profile"``, and the ``name`` was
``None`` so it defaults to just a timestamp.
"""
def __init__(
self,
name=None,
logger=None,
enable=False,
output_style="detailed",
directory="profile",
):
name = name or _make_timestamp()
self.directory = os.path.join(directory, name)
self.logger = logger
self.output_style = output_style
self._enable = enable
self._function_counts = defaultdict(lambda: defaultdict(int))
if self.enabled:
if not os.path.isdir(self.directory):
if os.path.isfile(self.directory):
raise EnvironmentError(
"can't save profile output; file already exists: {}".format(
self.directory
)
)
os.makedirs(self.directory, mode=0o744)
if self.logger:
self.logger.info("profiling enabled")
@property
def enabled(self):
"""
Return boolean indicating if the profiler should actually profile, or just pass
through results from any calls it's asked to handle.
"""
return (
self._enable
or os.environ.get("ENABLE_PYTHON_PROFILING", "").lower() == "true"
)
def call(self, category, f, args=None, kwargs=None, output_style=None):
"""
Do a function call and (if the profiler is enabled) save profiling results to
the directory for this category.
Args:
category (str): category to save the result under
f (Callable): function to call
args (Optional[List]): arguments to pass to f call
kwargs (Optional[Dict]): keyword arguments to pass to f call
output_style (Optional[str]):
whether to save complete profile files ("detailed") or only the
execution time ("simple"); defaults to detailed
Return:
exactly the return from calling ``f(*args, **kwargs)``
"""
args = args or []
kwargs = kwargs or {}
if not self.enabled:
return f(*args, **kwargs)
# count the number of times this function is executed in this category, so the
# filenames are kept unique
function_name = "{}.{}".format(f.__module__, f.__name__)
self._function_counts[category][function_name] += 1
output_style = output_style or self.output_style or "detailed"
if self.output_style == "detailed":
profiler = cProfile.Profile()
profiler.enable()
result = f(*args, **kwargs)
profiler.disable()
self._make_profile_category(category)
filename = "{}-{}.prof".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
profiler.dump_stats(path)
return result
elif self.output_style == "simple":
start = time.time()
result = f(*args, **kwargs)
execution_time = time.time() - start
filename = "{}-{}.time".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
# if the file exists already (say we gave the Profiler a directory that
# already exists, and re-ran the same function as the previous run), then
# tick up the counter until we're writing out new files
while os.path.exists(path):
self._function_counts[category][function_name] += 1
filename = "{}-{}.prof".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
with open(path, "w") as output_file:
output_file.write(str(execution_time))
return result
def profile_app(self, app):
"""
Enable WSGI's built-in profiler and include the output in the configured
profiling directory.
"""
if self.enabled:
path = self._make_profile_category("wsgi")
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir=path)
def _make_profile_category(self, name):
"""
Add a directory under the profiling directory given at initialization, for
saving a category of results into.
"""
path = os.path.join(self.directory, name)
try:
_mkdir_p(path)
except OSError:
raise EnvironmentError(
"can't save profile output; file already exists: {}".format(path)
)
return path
def _mkdir_p(directory, mode=0o774):
try:
os.makedirs(directory, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(directory):
raise
def _make_timestamp():
"""
Return a timestamp to identify this profiling run.
Output format is: ``2018-11-30T14:51:55.95``.
(Truncate to hundredths of a second.)
"""
return datetime.datetime.now().isoformat()[:-4]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2010 Nicolas Wack <[email protected]>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pygoo import MemoryObjectGraph, Equal, ontology
from guessit.slogging import setupLogging
from smewt import config
from smewt.ontology import Episode, Movie, Subtitle, Media, Config
from smewt.base import cache, utils, Collection
from smewt.base.taskmanager import TaskManager, FuncTask
from smewt.taggers import EpisodeTagger, MovieTagger
from smewt.plugins.feedwatcher import FeedWatcher
from threading import Timer
import smewt
import time
import os
import logging
log = logging.getLogger(__name__)
class VersionedMediaGraph(MemoryObjectGraph):
def __init__(self, *args, **kwargs):
super(VersionedMediaGraph, self).__init__(*args, **kwargs)
def add_object(self, node, recurse = Equal.OnIdentity, excluded_deps = list()):
result = super(VersionedMediaGraph, self).add_object(node, recurse, excluded_deps)
if isinstance(result, Media):
result.lastModified = time.time()
return result
def clear_keep_config(self):
# we want to keep our config object untouched
tmp = MemoryObjectGraph()
tmp.add_object(self.config)
super(VersionedMediaGraph, self).clear()
self.add_object(tmp.find_one(Config))
def __getattr__(self, name):
# if attr is not found and starts with an upper case letter, it might be the name
# of one of the registered classes. In that case, return a function that would instantiate
# such an object in this graph
if name[0].isupper() and name in ontology.class_names():
def inst(basenode = None, **kwargs):
result = super(VersionedMediaGraph, self).__getattr__(name)(basenode, **kwargs)
if isinstance(result, Media):
result.lastModified = time.time()
return result
return inst
raise AttributeError, name
@property
def config(self):
try:
return self.find_one(Config)
except ValueError:
return self.Config()
class SmewtDaemon(object):
def __init__(self):
super(SmewtDaemon, self).__init__()
# Note: put log file in data dir instead of log dir so that it is
# accessible through the user/ folder static view
self.logfile = utils.path(smewt.dirs.user_data_dir, 'Smewt.log')
setupLogging(filename=self.logfile, with_time=True, with_thread=True)
if smewt.config.PERSISTENT_CACHE:
self.loadCache()
# get a TaskManager for all the import tasks
self.taskManager = TaskManager()
# get our main graph DB
self.loadDB()
# get our collections: series and movies for now
self.episodeCollection = Collection(name = 'Series',
# import episodes and their subtitles too
validFiles = [ Episode.isValidEpisode,
Subtitle.isValidSubtitle ],
mediaTagger = EpisodeTagger,
dataGraph = self.database,
taskManager = self.taskManager)
self.movieCollection = Collection(name = 'Movie',
# import movies and their subtitles too
validFiles = [ Movie.isValidMovie,
Subtitle.isValidSubtitle ],
mediaTagger = MovieTagger,
dataGraph = self.database,
taskManager = self.taskManager)
if config.REGENERATE_THUMBNAILS:
# launch the regeneration of the thumbnails, but only after everything
# is setup and we are able to serve requests
Timer(3, self.regenerateSpeedDialThumbnails).start()
if self.database.config.get('tvuMldonkeyPlugin'):
# load up the feed watcher
self.feedWatcher = FeedWatcher(self)
# FIXME: this should go into a plugin.init() method
from smewt.plugins import mldonkey
mldonkey.send_command('vm')
# do not rescan as it would be too long and we might delete some files that
# are on an unaccessible network share or an external HDD
self.taskManager.add(FuncTask('Update collections', self.updateCollections))
def quit(self):
log.info('SmewtDaemon quitting...')
self.taskManager.finishNow()
try:
self.feedWatcher.quit()
except AttributeError:
pass
self.saveDB()
if smewt.config.PERSISTENT_CACHE:
self.saveCache()
log.info('SmewtDaemon quitting OK!')
def _cacheFilename(self):
return utils.path(smewt.dirs.user_cache_dir, 'Smewt.cache',
createdir=True)
def loadCache(self):
cache.load(self._cacheFilename())
def saveCache(self):
cache.save(self._cacheFilename())
def clearCache(self):
cache.clear()
cacheFile = self._cacheFilename()
log.info('Deleting cache file: %s' % cacheFile)
try:
os.remove(cacheFile)
except OSError:
pass
def loadDB(self):
dbfile = smewt.settings.get('database_file')
if not dbfile:
dbfile = utils.path(smewt.dirs.user_data_dir, 'Smewt.database',
createdir=True)
smewt.settings.set('database_file', dbfile)
log.info('Loading database from: %s', dbfile)
self.database = VersionedMediaGraph()
try:
self.database.load(dbfile)
except:
log.warning('Could not load database %s', dbfile)
def saveDB(self):
dbfile = smewt.settings.get('database_file')
log.info('Saving database to %s', dbfile)
self.database.save(dbfile)
def clearDB(self):
log.info('Clearing database...')
self.database.clear_keep_config()
self.database.save(smewt.settings.get('database_file'))
def updateCollections(self):
self.episodeCollection.update()
self.movieCollection.update()
def rescanCollections(self):
self.episodeCollection.rescan()
self.movieCollection.rescan()
def _regenerateSpeedDialThumbnails(self):
import shlex, subprocess
from PIL import Image
from StringIO import StringIO
webkit2png = (subprocess.call(['which', 'webkit2png'], stdout=subprocess.PIPE) == 0)
if not webkit2png:
log.warning('webkit2png not found. please run: "python setup.py install" from within the 3rdparty/webkit2png folder')
return
def gen(path, filename):
width, height = 200, 150
log.info('Creating %dx%d screenshot for %s...' % (width, height, path))
filename = utils.path(smewt.dirs.user_data_dir, 'speeddial', filename, createdir=True)
cmd = 'webkit2png -g 1000 600 "http://localhost:6543%s"' % path
screenshot, _ = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE).communicate()
im = Image.open(StringIO(screenshot))
im.thumbnail((width, height), Image.ANTIALIAS)
im.save(filename, "PNG")
gen('/movies', 'allmovies.png')
gen('/movies/table', 'moviestable.png')
gen('/movies/recent', 'recentmovies.png')
gen('/series', 'allseries.png')
gen('/series/suggestions', 'episodesuggestions.png')
gen('/feeds', 'feeds.png')
def regenerateSpeedDialThumbnails(self):
self.taskManager.add(FuncTask('Regenerate thumbnails',
self._regenerateSpeedDialThumbnails))
|
from collections import namedtuple
Button = namedtuple("Button", ["name", "winVal", "mask"])
buttons = [
Button(name="psxLeft", winVal=0x25, mask=1), # Arrow Left
Button(name="psxDown", winVal=0x28, mask=2), # Arrow Down
Button(name="psxRight", winVal=0x27, mask=4), # Arrow Right
Button(name="psxUp", winVal=0x26, mask=8), # Arrow Up
Button(name="psxStrt", winVal=0x58, mask=16), # X key
Button(name="psxSlct", winVal=0x5a, mask=128), # Z key
Button(name="psxSqu", winVal=0x41, mask=256), # A key
Button(name="psxX", winVal=0x53, mask=512), # S key
Button(name="psxO", winVal=0x44, mask=1024), # D key
Button(name="psxTri", winVal=0x57, mask=2048), # W key
Button(name="psxR1", winVal=0x59, mask=4096), # Y key
Button(name="psxL1", winVal=0x54, mask=8192), # T key
Button(name="psxR2", winVal=0x48, mask=16384), # H key
Button(name="psxL2", winVal=0x47, mask=32768) ] # G key
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import tempfile
from devil.utils import cmd_helper
from pylib import constants
from pylib.results import json_results
class JavaTestRunner(object):
"""Runs java tests on the host."""
def __init__(self, args):
self._coverage_dir = args.coverage_dir
self._package_filter = args.package_filter
self._runner_filter = args.runner_filter
self._test_filter = args.test_filter
self._test_suite = args.test_suite
def SetUp(self):
pass
def RunTest(self, _test):
"""Runs junit tests from |self._test_suite|."""
with tempfile.NamedTemporaryFile() as json_file:
java_script = os.path.join(
constants.GetOutDirectory(), 'bin', 'helper', self._test_suite)
command = [java_script]
# Add Jar arguments.
jar_args = ['-test-jars', self._test_suite + '.jar',
'-json-results-file', json_file.name]
if self._test_filter:
jar_args.extend(['-gtest-filter', self._test_filter])
if self._package_filter:
jar_args.extend(['-package-filter', self._package_filter])
if self._runner_filter:
jar_args.extend(['-runner-filter', self._runner_filter])
command.extend(['--jar-args', '"%s"' % ' '.join(jar_args)])
# Add JVM arguments.
jvm_args = []
# TODO(mikecase): Add a --robolectric-dep-dir arg to test runner.
# Have this arg set by GN in the generated test runner scripts.
jvm_args += [
'-Drobolectric.dependency.dir=%s' %
os.path.join(constants.GetOutDirectory(),
'lib.java', 'third_party', 'robolectric')]
if self._coverage_dir:
if not os.path.exists(self._coverage_dir):
os.makedirs(self._coverage_dir)
elif not os.path.isdir(self._coverage_dir):
raise Exception('--coverage-dir takes a directory, not file path.')
jvm_args.append('-Demma.coverage.out.file=%s' % os.path.join(
self._coverage_dir, '%s.ec' % self._test_suite))
if jvm_args:
command.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])
return_code = cmd_helper.RunCmd(command)
results_list = json_results.ParseResultsFromJson(
json.loads(json_file.read()))
return (results_list, return_code)
def TearDown(self):
pass
|
#!/usr/local/bin/python
#
# BitKeeper hook script.
#
# svn_buildbot.py was used as a base for this file, if you find any bugs or
# errors please email me.
#
# Amar Takhar <[email protected]>
'''
/path/to/bk_buildbot.py --repository "$REPOS" --revision "$REV" --branch \
"<branch>" --bbserver localhost --bbport 9989
'''
import commands
import sys
import os
import re
if sys.version_info < (2, 6):
import sets
# We have hackish "-d" handling here rather than in the Options
# subclass below because a common error will be to not have twisted in
# PYTHONPATH; we want to be able to print that error to the log if
# debug mode is on, so we set it up before the imports.
DEBUG = None
if '-d' in sys.argv:
i = sys.argv.index('-d')
DEBUG = sys.argv[i+1]
del sys.argv[i]
del sys.argv[i]
if DEBUG:
f = open(DEBUG, 'a')
sys.stderr = f
sys.stdout = f
from twisted.internet import defer, reactor
from twisted.python import usage
from twisted.spread import pb
from twisted.cred import credentials
class Options(usage.Options):
optParameters = [
['repository', 'r', None,
"The repository that was changed."],
['revision', 'v', None,
"The revision that we want to examine (default: latest)"],
['branch', 'b', None,
"Name of the branch to insert into the branch field. (REQUIRED)"],
['category', 'c', None,
"Schedular category."],
['bbserver', 's', 'localhost',
"The hostname of the server that buildbot is running on"],
['bbport', 'p', 8007,
"The port that buildbot is listening on"]
]
optFlags = [
['dryrun', 'n', "Do not actually send changes"],
]
def __init__(self):
usage.Options.__init__(self)
def postOptions(self):
if self['repository'] is None:
raise usage.error("You must pass --repository")
class ChangeSender:
def getChanges(self, opts):
"""Generate and stash a list of Change dictionaries, ready to be sent
to the buildmaster's PBChangeSource."""
# first we extract information about the files that were changed
repo = opts['repository']
print "Repo:", repo
rev_arg = ''
if opts['revision']:
rev_arg = '-r"%s"' % (opts['revision'], )
changed = commands.getoutput("bk changes -v %s -d':GFILE:\\n' '%s'" % (
rev_arg, repo)).split('\n')
# Remove the first line, it's an info message you can't remove (annoying)
del changed[0]
change_info = commands.getoutput("bk changes %s -d':USER:\\n$each(:C:){(:C:)\\n}' '%s'" % (
rev_arg, repo)).split('\n')
# Remove the first line, it's an info message you can't remove (annoying)
del change_info[0]
who = change_info.pop(0)
branch = opts['branch']
message = '\n'.join(change_info)
revision = opts.get('revision')
changes = {'who': who,
'branch': branch,
'files': changed,
'comments': message,
'revision': revision}
if opts.get('category'):
changes['category'] = opts.get('category')
return changes
def sendChanges(self, opts, changes):
pbcf = pb.PBClientFactory()
reactor.connectTCP(opts['bbserver'], int(opts['bbport']), pbcf)
d = pbcf.login(credentials.UsernamePassword('change', 'changepw'))
d.addCallback(self.sendAllChanges, changes)
return d
def sendAllChanges(self, remote, changes):
dl = remote.callRemote('addChange', changes)
return dl
def run(self):
opts = Options()
try:
opts.parseOptions()
if not opts['branch']:
print "You must supply a branch with -b or --branch."
sys.exit(1);
except usage.error, ue:
print opts
print "%s: %s" % (sys.argv[0], ue)
sys.exit()
changes = self.getChanges(opts)
if opts['dryrun']:
for k in changes.keys():
print "[%10s]: %s" % (k, changes[k])
print "*NOT* sending any changes"
return
d = self.sendChanges(opts, changes)
def quit(*why):
print "quitting! because", why
reactor.stop()
def failed(f):
print "FAILURE: %s" % f
reactor.stop()
d.addErrback(failed)
d.addCallback(quit, "SUCCESS")
reactor.callLater(60, quit, "TIMEOUT")
reactor.run()
if __name__ == '__main__':
s = ChangeSender()
s.run()
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_INFO').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_INFO/*')
O_CI_XDXT_CUSTOMER_INFO.registerTempTable("O_CI_XDXT_CUSTOMER_INFO")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet")
F_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_INFO/*')
F_CI_XDXT_CUSTOMER_INFO.registerTempTable("F_CI_XDXT_CUSTOMER_INFO")
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.CUSTOMERNAME AS CUSTOMERNAME
,A.CUSTOMERTYPE AS CUSTOMERTYPE
,A.CERTTYPE AS CERTTYPE
,A.CERTID AS CERTID
,A.CUSTOMERPASSWORD AS CUSTOMERPASSWORD
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.MFCUSTOMERID AS MFCUSTOMERID
,A.STATUS AS STATUS
,A.BELONGGROUPID AS BELONGGROUPID
,A.CHANNEL AS CHANNEL
,A.LOANCARDNO AS LOANCARDNO
,A.CUSTOMERSCALE AS CUSTOMERSCALE
,A.CORPORATEORGID AS CORPORATEORGID
,A.REMEDYFLAG AS REMEDYFLAG
,A.DRAWFLAG AS DRAWFLAG
,A.MANAGERUSERID AS MANAGERUSERID
,A.MANAGERORGID AS MANAGERORGID
,A.DRAWELIGIBILITY AS DRAWELIGIBILITY
,A.BLACKSHEETORNOT AS BLACKSHEETORNOT
,A.CONFIRMORNOT AS CONFIRMORNOT
,A.CLIENTCLASSN AS CLIENTCLASSN
,A.CLIENTCLASSM AS CLIENTCLASSM
,A.BUSINESSSTATE AS BUSINESSSTATE
,A.MASTERBALANCE AS MASTERBALANCE
,A.UPDATEDATE AS UPDATEDATE
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_CUSTOMER_INFO A --客户基本信息
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1 = sqlContext.sql(sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.registerTempTable("F_CI_XDXT_CUSTOMER_INFO_INNTMP1")
#F_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_INFO/*')
#F_CI_XDXT_CUSTOMER_INFO.registerTempTable("F_CI_XDXT_CUSTOMER_INFO")
sql = """
SELECT DST.CUSTOMERID --客户编号:src.CUSTOMERID
,DST.CUSTOMERNAME --客户名称:src.CUSTOMERNAME
,DST.CUSTOMERTYPE --客户类型:src.CUSTOMERTYPE
,DST.CERTTYPE --证件类型:src.CERTTYPE
,DST.CERTID --证件号:src.CERTID
,DST.CUSTOMERPASSWORD --客户口令:src.CUSTOMERPASSWORD
,DST.INPUTORGID --登记机构:src.INPUTORGID
,DST.INPUTUSERID --登记人:src.INPUTUSERID
,DST.INPUTDATE --登记日期:src.INPUTDATE
,DST.REMARK --备注:src.REMARK
,DST.MFCUSTOMERID --核心客户号:src.MFCUSTOMERID
,DST.STATUS --状态:src.STATUS
,DST.BELONGGROUPID --所属关联集团代码:src.BELONGGROUPID
,DST.CHANNEL --渠道:src.CHANNEL
,DST.LOANCARDNO --贷款卡编号:src.LOANCARDNO
,DST.CUSTOMERSCALE --客户规模:src.CUSTOMERSCALE
,DST.CORPORATEORGID --法人机构号:src.CORPORATEORGID
,DST.REMEDYFLAG --补登标志:src.REMEDYFLAG
,DST.DRAWFLAG --领取标志:src.DRAWFLAG
,DST.MANAGERUSERID --管户人:src.MANAGERUSERID
,DST.MANAGERORGID --管户机构ID:src.MANAGERORGID
,DST.DRAWELIGIBILITY --领取信息:src.DRAWELIGIBILITY
,DST.BLACKSHEETORNOT --是否黑名当客户:src.BLACKSHEETORNOT
,DST.CONFIRMORNOT --是否生效:src.CONFIRMORNOT
,DST.CLIENTCLASSN --当前客户分类:src.CLIENTCLASSN
,DST.CLIENTCLASSM --客户分类调整:src.CLIENTCLASSM
,DST.BUSINESSSTATE --存量字段标志:src.BUSINESSSTATE
,DST.MASTERBALANCE --单户余额:src.MASTERBALANCE
,DST.UPDATEDATE --更新日期:src.UPDATEDATE
,DST.FR_ID --法人代码:src.FR_ID
,DST.ODS_ST_DATE --平台日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --源系统代码:src.ODS_SYS_ID
FROM F_CI_XDXT_CUSTOMER_INFO DST
LEFT JOIN F_CI_XDXT_CUSTOMER_INFO_INNTMP1 SRC
ON SRC.CUSTOMERID = DST.CUSTOMERID
AND SRC.FR_ID = DST.FR_ID
WHERE SRC.CUSTOMERID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet"
F_CI_XDXT_CUSTOMER_INFO_INNTMP2=F_CI_XDXT_CUSTOMER_INFO_INNTMP2.unionAll(F_CI_XDXT_CUSTOMER_INFO_INNTMP1)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.cache()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.cache()
nrowsi = F_CI_XDXT_CUSTOMER_INFO_INNTMP1.count()
nrowsa = F_CI_XDXT_CUSTOMER_INFO_INNTMP2.count()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.unpersist()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_INFO lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT+".parquet")
|
# coding: utf-8
# # Language Translation
# In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
# ## Get the Data
# Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
# In[1]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
# In[6]:
source_text[:1000]
# In[7]:
target_text[:1000]
# ## Explore the Data
# Play around with view_sentence_range to view different parts of the data.
# In[2]:
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
# ## Implement Preprocessing Function
# ### Text to Word Ids
# As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of `target_text`. This will help the neural network predict when the sentence should end.
#
# You can get the `<EOS>` word id by doing:
# ```python
# target_vocab_to_int['<EOS>']
# ```
# You can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`.
# In[3]:
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
source_vocab_to_int = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')]
target_vocab_toint = [[target_vocab_to_int[word] for word in sentence.split()] + [target_vocab_to_int['<EOS>']] for sentence in target_text.split('\n')]
return source_vocab_to_int, target_vocab_toint
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
# ### Preprocess all the data and save it
# Running the code cell below will preprocess all the data and save it to file.
# In[4]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
# # Check Point
# This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
# In[2]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
# ### Check the Version of TensorFlow and Access to GPU
# This will check to make sure you have the correct version of TensorFlow and access to a GPU
# In[3]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# ## Build the Neural Network
# You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
# - `model_inputs`
# - `process_decoder_input`
# - `encoding_layer`
# - `decoding_layer_train`
# - `decoding_layer_infer`
# - `decoding_layer`
# - `seq2seq_model`
#
# ### Input
# Implement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
#
# - Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
# - Targets placeholder with rank 2.
# - Learning rate placeholder with rank 0.
# - Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
# - Target sequence length placeholder named "target_sequence_length" with rank 1
# - Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.
# - Source sequence length placeholder named "source_sequence_length" with rank 1
#
# Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
# In[4]:
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
target_sequence_length = tf.placeholder(tf.int32, (None, ), name='target_sequence_length')
max_target_len = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None, ), name='source_sequence_length')
return inputs, targets, learning_rate, keep_prob, target_sequence_length, max_target_len, source_sequence_length
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
# ### Process Decoder Input
# Implement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch.
# In[5]:
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_encoding_input(process_decoder_input)
# ### Encoding
# Implement `encoding_layer()` to create a Encoder RNN layer:
# * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)
# * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.md#stacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper)
# * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
# In[6]:
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# TODO: Implement Function
enc_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
def make_cell(rnn_size, keep_prob):
lstm = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2017))
return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_input, source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
# ### Decoding - Training
# Create a training decoding layer:
# * Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper)
# * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)
# * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)
# In[7]:
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# TODO: Implement Function
training_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer)
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished = True,
maximum_iterations = max_summary_length)[0]
return training_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
# ### Decoding - Inference
# Create inference decoder:
# * Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper)
# * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)
# * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)
# In[8]:
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
# TODO: Implement Function
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32),
[batch_size], name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
end_of_sequence_id)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper,
encoder_state, output_layer)
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished = True,
maximum_iterations=max_target_sequence_length)[0]
return inference_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
# ### Build the Decoding Layer
# Implement `decoding_layer()` to create a Decoder RNN layer.
#
# * Embed the target sequences
# * Construct the decoder LSTM cell (just like you constructed the encoder cell above)
# * Create an output layer to map the outputs of the decoder to the elements of our vocabulary
# * Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits.
# * Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits.
#
# Note: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference.
# In[9]:
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
decode_embedding = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
decode_embed_input = tf.nn.embedding_lookup(decode_embedding, dec_input)
def make_cell(rnn_size, keep_prob):
lstm = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2017))
return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
decode_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for _ in range(num_layers)])
output_layer = Dense(target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope('decode'):
train_decoder_output = decoding_layer_train(encoder_state,
decode_cell,
decode_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob)
with tf.variable_scope('decode', reuse=True):
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
infer_decoder_output = decoding_layer_infer(encoder_state,
decode_cell,
decode_embedding,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
target_vocab_size,
output_layer,
batch_size,
keep_prob)
return train_decoder_output, infer_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
# ### Build the Neural Network
# Apply the functions you implemented above to:
#
# - Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`.
# - Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function.
# - Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function.
# In[10]:
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
_, enc_state = encoding_layer(input_data,
rnn_size,
num_layers,
keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size)
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
train_decoder_output, infer_decoder_output = decoding_layer(dec_input,
enc_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size)
return train_decoder_output, infer_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
# ## Neural Network Training
# ### Hyperparameters
# Tune the following parameters:
#
# - Set `epochs` to the number of epochs.
# - Set `batch_size` to the batch size.
# - Set `rnn_size` to the size of the RNNs.
# - Set `num_layers` to the number of layers.
# - Set `encoding_embedding_size` to the size of the embedding for the encoder.
# - Set `decoding_embedding_size` to the size of the embedding for the decoder.
# - Set `learning_rate` to the learning rate.
# - Set `keep_probability` to the Dropout keep probability
# - Set `display_step` to state how many steps between each debug output statement
# In[11]:
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 3
# Embedding Size
encoding_embedding_size = 256
decoding_embedding_size = 256
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
display_step = 1000
# ### Build the Graph
# Build the graph using the neural network you implemented.
# In[12]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# Batch and pad the source and target sequences
# In[13]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
# ### Train
# Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
# In[14]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
# ### Save Parameters
# Save the `batch_size` and `save_path` parameters for inference.
# In[15]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
# # Checkpoint
# In[16]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
# ## Sentence to Sequence
# To feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.
#
# - Convert the sentence to lowercase
# - Convert words into ids using `vocab_to_int`
# - Convert words not in the vocabulary, to the `<UNK>` word id.
# In[17]:
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
sentence = sentence.lower()
word_ids = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.split()]
return word_ids
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
# ## Translate
# This will translate `translate_sentence` from English to French.
# In[21]:
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
# ## Imperfect Translation
# You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.
#
# You can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.
# ## Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
|
#!/usr/bin/python
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook ([email protected])
"""
import os
import datetime
import time
import re
import subprocess
from Queue import Queue
#from threading import Thread
import threading
import sys,getopt
'''
The workflow script accepts a tasklist file, which contains a list of taskfiles.
A task may represent a simulation of an ABM or climate model. Tasks can be run
simultaneously if there are no dependencies or ordered in the case of
dependencies. Tasks may also include pre-processing or post-processing tasks.
'''
# TODO: Logging may be useful if the workflow becomes long
# TODO: Currently num_threads is user-defined, which controls the number of threads to launch tasks
# However, it would be better to include in the taskfile the number of cores needed
# and define the number of cores available, enabling the workflow system to manage core allocation
# Global variables
# The number of threads used to handle tasks is passed as a parameter
num_threads=0
# Array of threads (so they can be killed if needed)
threads=[]
# Array of task workflow numbers (one per thread/worker)
threadtasknums=[]
# Task queue
taskqueue=Queue()
# This function handles executing a task defined by a taskfile
def runtask(taskfile):
# Read and parse the taskfile with the following format
# Note additional parameters will likely be added based on need (e.g., CWD, data-dir)
'''
program: /path/to/executable_with_a_name
parameters: param1 -Optionalconfiguration param2 -AnotherParameter
'''
with open(taskfile,'r') as f:
# Set the required parameters as None for error checking at the end
program=None
parameters=None
for line in f:
if line.startswith("program:"):
# Extract the entire program location from after the colon split()[1]) with whitespace removed (strip())
program=line.split(":",1)[1].strip()
#print "Program="+program
if line.startswith("parameters:"):
# Extract the parameter string from after the colon split()[1]) with whitespace removed (strip())
parameters=line.split(":",1)[1].strip()
#print "Parameters="+parameters
# Error checking for required parameters
if program==None:
raise Exception("program missing in taskfile",taskfile)
if parameters==None:
raise Exception("parameters missing in taskfile",taskfile)
print "Calling program="+program,parameters
'''
In future versions that have defined input,output,stdout,etc.
there could be more logic here to:
- run each model in a defined directory
- output stdout,stderr in the directory
- package up output files for easier transfer
- ...
'''
returncode=subprocess.check_call(program+" "+parameters,shell=True)
# A task worker loops while there are tasks left in the taskqueue
# Input parameter is a thread id (tid)
def taskworker(tid):
while not taskqueue.empty():
taskfile=taskqueue.get()
tasknum=taskfile.split("/",1)[1].split(".",1)[0].strip()
tasknum=re.sub("\D", "", tasknum)
#print "tid=",tid
threadtasknums[tid]=int(tasknum)
# While there is a dependency problem (lower order task numbers are still being processed)
# then spintwait
mintasknum=min(threadtasknums)
while threadtasknums[tid]>mintasknum:
#print "min=",minthreadtasknum,"min(array)=",min(*threadtasknums),"nums[",i,"]=",threadtasknums[i]
#if(threadtasknums[tid]<=min(*threadtasknums)): # If this task number is less than or equal to the minimum
# break # then there are no dependencies, so you can break out of this infinite loop
time.sleep(1) # this is a spin-wait loop
mintasknum=min(*threadtasknums)
print "Thread",tid,"running",taskfile,"at",str(datetime.datetime.now())
try:
runtask(taskfile)
except:
exit(1)
taskqueue.task_done()
threadtasknums[tid]=999999 # Set the tasknum for tid to 9999 so it doesn't influence dependencies
print "Thread",tid,"quitting, because taskqueue is empty"
# Main program code
def main():
print "Starting node workflow"
try:
opts,args=getopt.getopt(sys.argv[1:],"n:t:",["numthreads=","tasklist="])
except getopt.GetoptError:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
# Set model filename and experiment name based on command-line parameter
num_threads=0
tasklistfile=""
for opt, arg in opts:
if opt in ("-n", "--numthreads"):
num_threads=int(arg)
if opt in ("-t", "--tasklist"):
tasklistfile=arg
err=0
if num_threads<=0:
print " [ ERROR ] Number of threads must be greater than 0"
err=1
if tasklistfile=="":
print " [ ERROR ] Must provide tasklistfile"
err=1
if err==1:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
print "Executing in current directory :",os.getcwd()
print "Reading tasklist file"
with open(tasklistfile,'r') as f:
taskfiles = f.readlines()
f.close()
# tasksdir = 'tasks/'
# taskfiles = os.listdir(tasksdir) # Contains a list of task files to process
taskfiles.sort()
print "Starting task queue"
for taskfile in taskfiles:
taskqueue.put(taskfile.strip())
print "Task queue contains ",taskqueue.qsize()," tasks"
# Start the workflow engine
# Currently the logic is simple -> one task==one thread==one core but that will need
# to be modified to account for multithreaded models (agent-based and climate)
# so eventually this will need to parse the task to determine the number of cores
# needed by the task and dynamically manage the number of tasks running simultaneously
print "Starting ",num_threads," threads"
for i in range(num_threads):
threadtasknums.append(-1)
t=threading.Thread(target=taskworker,args=(i,))
t.daemon=True
t.setDaemon(True)
t.start()
threads.append(t)
# Now we wait until all of the tasks are finished.
print "Waiting for threads to finish"
# Normally you can use a blocking .join, but then you cannot kill the process
# So instead we spin-wait and catch ^C so a user can kill this process.
# while threading.activeCount() > 0:
# time.sleep(20)
while taskqueue.qsize()>0:
time.sleep(1)
print "taskqueue size",taskqueue.qsize()
''' # FIXME: Need to clean up this code, which was used for testing ^C
try:
time.sleep(5) # Wait 5 seconds before checking again
# FIXME: In production this should be changed to 30
# If Ctrl+C or other error, kill all of the threads
except:
while not taskqueue.empty(): # Empty the queue
taskqueue.get()
for i in threads:
i.kill_received=True
i.kill()
exit(1)
'''
print "Joining taskqueue"
# At this point all of the tasks should be finished so we join them
notfinished=1
while notfinished==1:
notfinished=0
for i in range(num_threads):
if threadtasknums[i]<999999:
notfinished=1
time.sleep(1)
#while not taskqueue.join(1):
# time.sleep(1)
print "Finished node workflow"
# Run main
if __name__=="__main__":
main()
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_invalidation
short_description: create invalidations for AWS CloudFront distributions
description:
- Allows for invalidation of a batch of paths for a CloudFront distribution.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author: Willem van Ketwich (@wilvk)
extends_documentation_fragment:
- aws
- ec2
options:
distribution_id:
description:
- The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
required: false
type: str
alias:
description:
- The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
required: false
type: str
caller_reference:
description:
- A unique reference identifier for the invalidation paths.
- Defaults to current datetime stamp.
required: false
default:
type: str
target_paths:
description:
- A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*'
required: true
type: list
elements: str
notes:
- does not support check mode
'''
EXAMPLES = '''
- name: create a batch of invalidations using a distribution_id for a reference
cloudfront_invalidation:
distribution_id: E15BU8SDCGSG57
caller_reference: testing 123
target_paths:
- /testpathone/test1.css
- /testpathtwo/test2.js
- /testpaththree/test3.ss
- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
cloudfront_invalidation:
alias: alias.test.com
caller_reference: testing 123
target_paths:
- /testpathone/test4.css
- /testpathtwo/test5.js
- /testpaththree/*
'''
RETURN = '''
invalidation:
description: The invalidation's information.
returned: always
type: complex
contains:
create_time:
description: The date and time the invalidation request was first made.
returned: always
type: str
sample: '2018-02-01T15:50:41.159000+00:00'
id:
description: The identifier for the invalidation request.
returned: always
type: str
sample: I2G9MOWJZFV612
invalidation_batch:
description: The current invalidation information for the batch request.
returned: always
type: complex
contains:
caller_reference:
description: The value used to uniquely identify an invalidation request.
returned: always
type: str
sample: testing 123
paths:
description: A dict that contains information about the objects that you want to invalidate.
returned: always
type: complex
contains:
items:
description: A list of the paths that you want to invalidate.
returned: always
type: list
sample:
- /testpathtwo/test2.js
- /testpathone/test1.css
- /testpaththree/test3.ss
quantity:
description: The number of objects that you want to invalidate.
returned: always
type: int
sample: 3
status:
description: The status of the invalidation request.
returned: always
type: str
sample: Completed
location:
description: The fully qualified URI of the distribution and invalidation batch request.
returned: always
type: str
sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
'''
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import snake_dict_to_camel_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
import datetime
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by imported AnsibleAWSModule
class CloudFrontInvalidationServiceManager(object):
"""
Handles CloudFront service calls to AWS for invalidations
"""
def __init__(self, module):
self.module = module
self.create_client('cloudfront')
def create_client(self, resource):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self.module, boto3=True)
self.client = boto3_conn(self.module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs)
def create_invalidation(self, distribution_id, invalidation_batch):
current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
try:
response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
response.pop('ResponseMetadata', None)
if current_invalidation_response:
return response, False
else:
return response, True
except BotoCoreError as e:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
except ClientError as e:
if ('Your request contains a caller reference that was used for a previous invalidation batch '
'for the same distribution.' in e.response['Error']['Message']):
self.module.warn("InvalidationBatch target paths are not modifiable. "
"To make a new invalidation please update caller_reference.")
return current_invalidation_response, False
else:
self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
def get_invalidation(self, distribution_id, caller_reference):
current_invalidation = {}
# find all invalidations for the distribution
try:
paginator = self.client.get_paginator('list_invalidations')
invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
invalidation_ids = [inv['Id'] for inv in invalidations]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
# check if there is an invalidation with the same caller reference
for inv_id in invalidation_ids:
try:
invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
if caller_ref == caller_reference:
current_invalidation = invalidation
break
current_invalidation.pop('ResponseMetadata', None)
return current_invalidation
class CloudFrontInvalidationValidationManager(object):
"""
Manages CloudFront validations for invalidation batches
"""
def __init__(self, module):
self.module = module
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
def validate_distribution_id(self, distribution_id, alias):
try:
if distribution_id is None and alias is None:
self.module.fail_json(msg="distribution_id or alias must be specified")
if distribution_id is None:
distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
return distribution_id
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating parameters.")
def create_aws_list(self, invalidation_batch):
aws_list = {}
aws_list["Quantity"] = len(invalidation_batch)
aws_list["Items"] = invalidation_batch
return aws_list
def validate_invalidation_batch(self, invalidation_batch, caller_reference):
try:
if caller_reference is not None:
valid_caller_reference = caller_reference
else:
valid_caller_reference = datetime.datetime.now().isoformat()
valid_invalidation_batch = {
'paths': self.create_aws_list(invalidation_batch),
'caller_reference': valid_caller_reference
}
return valid_invalidation_batch
except (ClientError, BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
caller_reference=dict(),
distribution_id=dict(),
alias=dict(),
target_paths=dict(required=True, type='list')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
validation_mgr = CloudFrontInvalidationValidationManager(module)
service_mgr = CloudFrontInvalidationServiceManager(module)
caller_reference = module.params.get('caller_reference')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
target_paths = module.params.get('target_paths')
result = {}
distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
if __name__ == '__main__':
main()
|
from .agent import do_request
from .base import BaseSource
from .types import Types
class RoundTables(BaseSource):
def __init__(self):
super(RoundTables, self).__init__()
self._start_urls = [
'https://api.zhihu.com/roundtables?excerpt_len=75'
]
def _parse(self, json_objs):
urls = []
for obj in json_objs['data']:
t = obj.get('type')
if t != 'roundtable':
continue
urls.append(obj['url'])
questions_url = [u + '/questions?excerpt_len=75' for u in urls]
for url in questions_url:
objs = do_request(url)
while not objs['paging']['is_end']:
for obj in objs['data']:
if obj['type'] != 'question':
continue
self.publish(obj['url'], Types.QUESTION)
self.get_answer_url_by_question_url(obj['url'])
next_url = objs['paging']['next']
objs = do_request(next_url)
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# to know which neurons to keep
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
clients = ipyparallel.Client()
print(clients.ids)
dview = clients.direct_view()
def compute_pop_pca(session):
data_directory = '/mnt/DataGuillaume/MergedData/'
import numpy as np
import scipy.io
import scipy.stats
import _pickle as cPickle
import time
import os, sys
import neuroseries as nts
from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter
import pandas as pd
# to know which neurons to keep
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
# for session in datasets:
# for session in datasets[0:15]:
# for session in ['Mouse12/Mouse12-120815']:
start_time = time.clock()
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
speed_ep = nts.IntervalSet(speed[speed>2.5].index.values[0:-1], speed[speed>2.5].index.values[1:]).drop_long_intervals(26000).merge_close_intervals(50000)
wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000)
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
rip_ep,rip_tsd = loadRipples(data_directory+session)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
all_neurons = np.array(list(spikes.keys()))
mod_neurons = np.array([int(n.split("_")[1]) for n in neurons_index if session.split("/")[1] in n])
if len(sleep_ep) > 1:
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/"+session.split("/")[1]+".h5")
# all_pop = store['allwake']
pre_pop = store['presleep']
pos_pop = store['postsleep']
store.close()
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/"+session.split("/")[1]+".h5")
all_pop = store['allwake']
# pre_pop = store['presleep']
# pos_pop = store['postsleep']
store.close()
def compute_eigen(popwak):
popwak = popwak - popwak.mean(0)
popwak = popwak / (popwak.std(0)+1e-8)
from sklearn.decomposition import PCA
pca = PCA(n_components = popwak.shape[1])
xy = pca.fit_transform(popwak.values)
pc = pca.explained_variance_ > (1 + np.sqrt(1/(popwak.shape[0]/popwak.shape[1])))**2.0
eigen = pca.components_[pc]
lambdaa = pca.explained_variance_[pc]
return eigen, lambdaa
def compute_score(ep_pop, eigen, lambdaa, thr):
ep_pop = ep_pop - ep_pop.mean(0)
ep_pop = ep_pop / (ep_pop.std(0)+1e-8)
a = ep_pop.values
score = np.zeros(len(ep_pop))
for i in range(len(eigen)):
if lambdaa[i] >= thr:
score += (np.dot(a, eigen[i])**2.0 - np.dot(a**2.0, eigen[i]**2.0))
score = nts.Tsd(t = ep_pop.index.values, d = score)
return score
def compute_rip_score(tsd, score, bins):
times = np.floor(((bins[0:-1] + (bins[1] - bins[0])/2)/1000)).astype('int')
rip_score = pd.DataFrame(index = times, columns = [])
for r,i in zip(tsd.index.values,range(len(tsd))):
xbins = (bins + r).astype('int')
y = score.groupby(pd.cut(score.index.values, bins=xbins, labels = times)).mean()
if ~y.isnull().any():
rip_score[r] = y
return rip_score
def get_xmin(ep, minutes):
duree = (ep['end'] - ep['start'])/1000/1000/60
tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]]
return nts.IntervalSet(tmp['start'], tmp['end'])
pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0])
post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1])
pre_sws_ep = sws_ep.intersect(pre_ep)
pos_sws_ep = sws_ep.intersect(post_ep)
pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30)
pos_sws_ep = get_xmin(pos_sws_ep, 30)
if pre_sws_ep.tot_length('s')/60 > 5.0 and pos_sws_ep.tot_length('s')/60 > 5.0:
for hd in range(3):
if hd == 0 or hd == 2:
index = np.where(hd_info_neuron == 0)[0]
elif hd == 1:
index = np.where(hd_info_neuron == 1)[0]
if hd == 0:
index = np.intersect1d(index, mod_neurons)
elif hd == 2:
index = np.intersect1d(index, np.setdiff1d(all_neurons, mod_neurons))
allpop = all_pop[index].copy()
prepop = nts.TsdFrame(pre_pop[index].copy())
pospop = nts.TsdFrame(pos_pop[index].copy())
# prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy())
# pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy())
if allpop.shape[1] and allpop.shape[1] > 5:
eigen,lambdaa = compute_eigen(allpop)
seuil = 1.2
if np.sum(lambdaa > seuil):
pre_score = compute_score(prepop, eigen, lambdaa, seuil)
pos_score = compute_score(pospop, eigen, lambdaa, seuil)
prerip_score = compute_rip_score(rip_tsd.restrict(pre_sws_ep), pre_score, bins1)
posrip_score = compute_rip_score(rip_tsd.restrict(pos_sws_ep), pos_score, bins1)
# pre_score_25ms = compute_score(prepop25ms, eigen)
# pos_score_25ms = compute_score(pospop25ms, eigen)
# prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep), pre_score_25ms, bins2)
# posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms, bins2)
# prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0)
# posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0)
# prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0)
# posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0)
# prerip25ms_score = prerip25ms_score.loc[-500:500]
# posrip25ms_score = posrip25ms_score.loc[-500:500]
# sys.exit()
# tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1)
# tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos'])
# tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True)
premeanscore[hd]['rip'][session] = prerip_score.mean(1)
posmeanscore[hd]['rip'][session] = posrip_score.mean(1)
# if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)):
# premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean()
# posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean()
# premeanscore[hd]['rem'].loc[session,'std'] = pre_score.restrict(rem_ep.intersect(pre_ep)).std()
# posmeanscore[hd]['rem'].loc[session,'std'] = pos_score.restrict(rem_ep.intersect(post_ep)).std()
return [premeanscore, posmeanscore, tsmax]
# sys.exit()
a = dview.map_sync(compute_pop_pca, datasets)
prescore = {i:pd.DataFrame(index = times) for i in range(3)}
posscore = {i:pd.DataFrame(index = times) for i in range(3)}
for i in range(len(a)):
for j in range(3):
if len(a[i][0][j]['rip'].columns):
s = a[i][0][j]['rip'].columns[0]
prescore[j][s] = a[i][0][j]['rip']
posscore[j][s] = a[i][1][j]['rip']
# prescore = premeanscore
# posscore = posmeanscore
from pylab import *
titles = ['non hd mod', 'hd', 'non hd non mod']
figure()
for i in range(3):
subplot(1,3,i+1)
times = prescore[i].index.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(prescore[i].mean(1).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posscore[i].mean(1).values, (1,)), label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
show()
sys.exit()
#########################################
# search for peak in 25 ms array
########################################
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(2)}
for i in range(len(a)):
for hd in range(2):
tsmax[hd] = tsmax[hd].append(a[i][2][hd], ignore_index = True)
from pylab import *
plot(tsmax[0]['pos'], np.ones(len(tsmax[0]['pos'])), 'o')
plot(tsmax[0]['pos'].mean(), [1], '|', markersize = 10)
plot(tsmax[1]['pos'], np.zeros(len(tsmax[1]['pos'])), 'o')
plot(tsmax[1]['pos'].mean(), [0], '|', markersize = 10)
sys.exit()
#########################################
# SAVING
########################################
store = pd.HDFStore("../figures/figures_articles/figure3/pca_analysis_3.h5")
for i,j in zip(range(3),('nohd_mod', 'hd', 'nohd_nomod')):
store.put(j+'pre_rip', prescore[i])
store.put(j+'pos_rip', posscore[i])
store.close()
# a = dview.map_sync(compute_population_correlation, datasets[0:15])
# for i in range(len(a)):
# if type(a[i]) is dict:
# s = list(a[i].keys())[0]
# premeanscore.loc[s] = a[i][s]['pre']
# posmeanscore.loc[s] = a[i][s]['pos']
from pylab import *
titles = ['non hd', 'hd']
figure()
for i in range(2):
subplot(1,3,i+1)
times = premeanscore[i]['rip'].columns.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(premeanscore[i]['rip'].mean(0).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posmeanscore[i]['rip'].mean(0).values, (1,)),label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
subplot(1,3,3)
bar([1,2], [premeanscore[0]['rem'].mean(0)['mean'], premeanscore[1]['rem'].mean(0)['mean']])
bar([3,4], [posmeanscore[0]['rem'].mean(0)['mean'], posmeanscore[1]['rem'].mean(0)['mean']])
xticks([1,2], ['non hd', 'hd'])
xticks([3,4], ['non hd', 'hd'])
show()
figure()
subplot(121)
times = premeanscore[0]['rip'].columns.values
for s in premeanscore[0]['rip'].index.values:
print(s)
plot(times, premeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'blue')
plot(premeanscore[0]['rip'].mean(0))
subplot(122)
for s in posmeanscore[0]['rip'].index.values:
plot(times, posmeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'red')
plot(posmeanscore[0]['rip'].mean(0))
show()
|
from unittest import mock
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.urls.exceptions import NoReverseMatch
from wagtail.contrib.routable_page.templatetags.wagtailroutablepage_tags import routablepageurl
from wagtail.core.models import Page, Site
from wagtail.tests.routablepage.models import (
RoutablePageTest, RoutablePageWithOverriddenIndexRouteTest)
class TestRoutablePage(TestCase):
model = RoutablePageTest
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=self.model(
title="Routable Page",
live=True,
))
def test_resolve_index_route_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/')
self.assertEqual(view, self.routable_page.index_route)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_resolve_archive_by_year_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/year/2014/')
self.assertEqual(view, self.routable_page.archive_by_year)
self.assertEqual(args, ('2014', ))
self.assertEqual(kwargs, {})
def test_resolve_archive_by_author_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/author/joe-bloggs/')
self.assertEqual(view, self.routable_page.archive_by_author)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'author_slug': 'joe-bloggs'})
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
def test_resolve_external_view_other_route(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external-no-arg/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_reverse_index_route_view(self):
url = self.routable_page.reverse_subpage('index_route')
self.assertEqual(url, '')
def test_reverse_archive_by_year_view(self):
url = self.routable_page.reverse_subpage('archive_by_year', args=('2014', ))
self.assertEqual(url, 'archive/year/2014/')
def test_reverse_archive_by_author_view(self):
url = self.routable_page.reverse_subpage('archive_by_author', kwargs={'author_slug': 'joe-bloggs'})
self.assertEqual(url, 'archive/author/joe-bloggs/')
def test_reverse_overridden_name(self):
url = self.routable_page.reverse_subpage('name_overridden')
self.assertEqual(url, 'override-name-test/')
def test_reverse_overridden_name_default_doesnt_work(self):
with self.assertRaises(NoReverseMatch):
self.routable_page.reverse_subpage('override_name_test')
def test_reverse_external_view(self):
url = self.routable_page.reverse_subpage('external_view', args=('joe-bloggs', ))
self.assertEqual(url, 'external/joe-bloggs/')
def test_reverse_external_view_other_route(self):
url = self.routable_page.reverse_subpage('external_view')
self.assertEqual(url, 'external-no-arg/')
def test_get_index_route_view(self):
response = self.client.get(self.routable_page.url)
self.assertContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_routable_page_with_overridden_index_route(self):
page = self.home_page.add_child(
instance=RoutablePageWithOverriddenIndexRouteTest(
title="Routable Page with overridden index",
live=True
)
)
response = self.client.get(page.url)
self.assertContains(response, "OVERRIDDEN INDEX ROUTE")
self.assertNotContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_archive_by_year_view(self):
response = self.client.get(self.routable_page.url + 'archive/year/2014/')
self.assertContains(response, "ARCHIVE BY YEAR: 2014")
def test_earlier_view_takes_precedence(self):
response = self.client.get(self.routable_page.url + 'archive/year/1984/')
self.assertContains(response, "we were always at war with eastasia")
def test_get_archive_by_author_view(self):
response = self.client.get(self.routable_page.url + 'archive/author/joe-bloggs/')
self.assertContains(response, "ARCHIVE BY AUTHOR: joe-bloggs")
def test_get_external_view(self):
response = self.client.get(self.routable_page.url + 'external/joe-bloggs/')
self.assertContains(response, "EXTERNAL VIEW: joe-bloggs")
def test_get_external_view_other_route(self):
response = self.client.get(self.routable_page.url + 'external-no-arg/')
self.assertContains(response, "EXTERNAL VIEW: ARG NOT SET")
def test_routable_page_can_have_instance_bound_descriptors(self):
# This descriptor pretends that it does not exist in the class, hence
# it raises an AttributeError when class bound. This is, for instance,
# the behavior of django's FileFields.
class InstanceDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
raise AttributeError
return 'value'
def __set__(self, instance, value):
raise AttributeError
try:
RoutablePageTest.descriptor = InstanceDescriptor()
RoutablePageTest.get_subpage_urls()
finally:
del RoutablePageTest.descriptor
class TestRoutablePageTemplateTag(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'development.local'])
class TestRoutablePageTemplateTagForSecondSiteAtSameRoot(TestCase):
"""
When multiple sites exist on the same root page, relative URLs within that subtree should
omit the domain, in line with #4390
"""
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
second_site = Site.objects.create( # add another site with the same root page
hostname='development.local',
port=default_site.port,
root_page_id=default_site.root_page_id,
)
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
self.request.META['HTTP_HOST'] = second_site.hostname
self.request.META['SERVER_PORT'] = second_site.port
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
@override_settings(ALLOWED_HOSTS=['testserver', 'localhost', 'events.local'])
class TestRoutablePageTemplateTagForSecondSiteAtDifferentRoot(TestCase):
"""
When multiple sites exist, relative URLs between such sites should include the domain portion
"""
def setUp(self):
self.home_page = Page.objects.get(id=2)
events_page = self.home_page.add_child(instance=Page(title='Events', live=True))
second_site = Site.objects.create(
hostname='events.local',
port=80,
root_page=events_page,
)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.context = {'request': self.request}
self.request.META['HTTP_HOST'] = second_site.hostname
self.request.META['SERVER_PORT'] = second_site.port
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, 'http://localhost/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, 'http://localhost/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = 'http://localhost/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
|
import os
import sys
import imp
import types
class Finder:
def __init__(self, ext):
self.ext = ext
def find_module(self, fullname, path):
for dirname in sys.path:
filename = os.path.join(dirname, *(fullname.split('.'))) + self.ext
if os.path.exists(filename):
self.filename = filename
return self
class BaseImporter(Finder):
def load_module(self, fullname):
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
sys.modules[fullname] = mod = imp.new_module(fullname)
mod.__file__ = self.filename
mod.__loader__ = self
exec(self.get_source(self.filename), mod.__dict__)
return mod
class TemplateModule(types.ModuleType):
def __call__(self, kvs=None):
return self.__call__(kvs)
class TemplateImporter(Finder):
def load_module(self, fullname):
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
sys.modules[fullname] = mod = TemplateModule(fullname)
mod.__file__ = self.filename
mod.__loader__ = self
exec(self.get_source(self.filename), mod.__dict__)
return mod
class JsonImporter(BaseImporter):
def get_source(self, filename):
return """
import json
root = json.loads(\"\"\"%s\"\"\")
locals().update(root)
""" % slurp(filename)
class YamlImporter(BaseImporter):
def get_source(self, filename):
return """
import yaml
root = yaml.load(\"\"\"%s\"\"\")
locals().update(root)
""" % slurp(filename)
class SimpleTemplateImporter(TemplateImporter):
code_template = """
from html import escape
from string import Template
template = Template(\"\"\"%s\"\"\")
def __call__(kvs=None):
if type(kvs) is dict:
return template.substitute({k: escape(v) for k, v in kvs.items()})
else:
return template.template"""
def get_source(self, filename):
return self.code_template % slurp(filename)
class JadeImporter(TemplateImporter):
def get_source(self, filename):
pass
class JinjaImporter(TemplateImporter):
def get_source(self, filename):
pass
class MustacheImporter(TemplateImporter):
code_template = """
from pystache import render
def __call__(kvs=None):
template = \"\"\"%s\"\"\"
if type(kvs) is dict:
return render(template, kvs)
else:
return template"""
def get_source(self, filename):
return self.code_template % slurp(filename)
class ConfigImporter(BaseImporter):
def get_source(self, filename):
return """
from configparser import ConfigParser
config = ConfigParser()
config.read('%(filename)s')
""" % dict(filename=filename)
def slurp(filename):
with open(filename) as fp:
return fp.read()
def basename(filename):
return os.path.splitext(os.path.basename(filename))[0]
def install(importer):
return sys.meta_path.append(importer)
def uninstall(importer):
sys.meta_path.remove(importer)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Define the classes and methods to work with sections.
"""
import numpy as np
class BeamSection(object):
"""Defines a beam section
Parameters
----------
name: str
name of the section
material: Material instance
material of the section defined as an instance of Material object
data: tuple
properties of the section
type: str
defines the type of cross-section
+-------------------------+------------------------------+
| type | *data* format |
+=========================+==============================+
|'rectangular': |``data=(width, height,)`` |
+-------------------------+------------------------------+
|'circular': |``data=(r, )`` |
+-------------------------+------------------------------+
|'I-section': |``data=(H, h_f, w_web, w_f)`` |
+-------------------------+------------------------------+
|'general': |``data=(A, I_3,)`` |
+-------------------------+------------------------------+
"""
def __init__(self, name, material, data, type='rectangular'):
self._name = name
self._material = material
self._data = data
self._type = type
self._area = 0
self._Iz = 0
self._Iy = 0
self._Jx = 0
self.compute_properties()
def print_properties(self):
"""Prints the properties of the BeamSection instance
:returns: TODO
"""
if self._type == 'rectangular':
props = {'width': self._data[0], 'height': self._data[1]}
else:
props = 'undefined'
return 'Properties: ' + str(props)
def compute_properties(self):
"""Compute all the mechanical properties for the given section
:returns: TODO
"""
# Calculate the area
self._area = self.calc_area()
self._Iz, self._Iy = self.calc_inertia()
def calc_area(self):
"""Calculate the area of the section
:returns: TODO
"""
type = self._type
if type == 'rectangular':
width = self._data[0]
height = self._data[1]
return width * height
elif type == 'general':
return self._data[0]
elif type == 'circular':
radius = self._data[0]
return np.pi * radius**2
def calc_inertia(self):
"""Calculate the moment of inertia of the beam section
:returns: Iz, Iy
"""
type = self._type
if type == 'rectangular':
width = self._data[0]
height = self._data[1]
I_z = width * height**3 / 12.
I_y = height * width**3 / 12.
return I_z, I_y
elif type == 'general':
return self._data[1], 0
def __str__(self):
"""
Returns the printable string for this object
"""
return 'Beam Section: {name}, type: {t}'.format(name=self._name,
t=self._type)
def __repr__(self):
"""
Returns the printable string for this object
"""
return 'Beam Section: {name}, type: {t}'.format(name=self._name,
t=self._type)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006 José de Paula Eufrásio Junior ([email protected]) AND
# Yves Junqueira ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# from http://www.voidspace.org.uk/python/pathutils.html (BSD License)
def formatbytes(sizeint, configdict=None, **configs):
"""
Given a file size as an integer, return a nicely formatted string that
represents the size. Has various options to control it's output.
You can pass in a dictionary of arguments or keyword arguments. Keyword
arguments override the dictionary and there are sensible defaults for options
you don't set.
Options and defaults are as follows :
* ``forcekb = False`` - If set this forces the output to be in terms
of kilobytes and bytes only.
* ``largestonly = True`` - If set, instead of outputting
``1 Mbytes, 307 Kbytes, 478 bytes`` it outputs using only the largest
denominator - e.g. ``1.3 Mbytes`` or ``17.2 Kbytes``
* ``kiloname = 'Kbytes'`` - The string to use for kilobytes
* ``meganame = 'Mbytes'`` - The string to use for Megabytes
* ``bytename = 'bytes'`` - The string to use for bytes
* ``nospace = True`` - If set it outputs ``1Mbytes, 307Kbytes``,
notice there is no space.
Example outputs : ::
19Mbytes, 75Kbytes, 255bytes
2Kbytes, 0bytes
23.8Mbytes
.. note::
It currently uses the plural form even for singular.
"""
defaultconfigs = { 'forcekb' : False,
'largestonly' : True,
'kiloname' : 'Kbytes',
'meganame' : 'Mbytes',
'bytename' : 'bytes',
'nospace' : True}
if configdict is None:
configdict = {}
for entry in configs:
# keyword parameters override the dictionary passed in
configdict[entry] = configs[entry]
#
for keyword in defaultconfigs:
if not configdict.has_key(keyword):
configdict[keyword] = defaultconfigs[keyword]
#
if configdict['nospace']:
space = ''
else:
space = ' '
#
mb, kb, rb = bytedivider(sizeint)
if configdict['largestonly']:
if mb and not configdict['forcekb']:
return stringround(mb, kb)+ space + configdict['meganame']
elif kb or configdict['forcekb']:
if mb and configdict['forcekb']:
kb += 1024*mb
return stringround(kb, rb) + space+ configdict['kiloname']
else:
return str(rb) + space + configdict['bytename']
else:
outstr = ''
if mb and not configdict['forcekb']:
outstr = str(mb) + space + configdict['meganame'] +', '
if kb or configdict['forcekb'] or mb:
if configdict['forcekb']:
kb += 1024*mb
outstr += str(kb) + space + configdict['kiloname'] +', '
return outstr + str(rb) + space + configdict['bytename']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a xchatlog formatter in plaso."""
from plaso.formatters import interface
class XChatLogFormatter(interface.ConditionalEventFormatter):
"""Formatter for XChat log files."""
DATA_TYPE = 'xchat:log:line'
FORMAT_STRING_PIECES = [u'[nickname: {nickname}]',
u'{text}']
SOURCE_LONG = 'XChat Log File'
SOURCE_SHORT = 'LOG'
|
#!/usr/bin/env python
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
import subprocess
import sys
if len(sys.argv) is 1:
print("ERROR: This script requires arguments!\n"
"%s repository_path review_url repository_name "
"zuul_changes" % sys.argv[0])
sys.exit(1)
repo_path = sys.argv[1]
review_url = sys.argv[2]
repo_name = sys.argv[3]
change_list = str(sys.argv[4]).split('^')
applicable_changes = [x for x in change_list if repo_name in x]
try:
for change in applicable_changes:
(project, branch, ref) = change.split(':')
if re.search(repo_name, project):
if not re.search(branch, subprocess.check_output(
['git', '-C', repo_path, 'status', '-s', '-b'])):
command = ['git', '-C', repo_path, 'checkout', branch]
subprocess.call(command, stdout=True)
command = ['git', '-C', repo_path, 'fetch',
review_url + "/" + repo_name, ref]
if subprocess.call(command, stdout=True) is 0:
if subprocess.call(
['git', '-C', repo_path, 'cherry-pick',
'-n', 'FETCH_HEAD'], stdout=True) is 0:
print("Applied %s" % ref)
else:
print("Failed to cherry pick %s on to %s branch %s"
% (ref, repo_name, branch))
sys.exit(1)
else:
print("Failed to download %s on to %s branch %s"
% (ref, repo_name, branch))
sys.exit(1)
except Exception as e:
print("Failed to process change: %s" % e)
|
#!/usr/bin/env python
import numpy as np
import naima
from astropy import units as u
from astropy.io import ascii
## Read data
data=ascii.read('CrabNebula_HESS_2006.dat')
## Set initial parameters
p0=np.array((474,2.34,np.log10(80.),))
labels=['norm','index','log10(cutoff)']
## Model definition
ph_energy = u.Quantity(data['energy'])
# peak gamma ph_energy production is ~0.1*Ep, so enemid corresponds to Ep=10*enemid
# If a cutoff is present, this should be reduced to reduce parameter correlation
e_0 = 5.*np.sqrt(ph_energy[0]*ph_energy[-1])
from naima.models import PionDecay, ExponentialCutoffPowerLaw
ECPL = ExponentialCutoffPowerLaw(1 / u.TeV, e_0, 2, 60. * u.TeV)
PP = PionDecay(ECPL)
distance = 2.0 * u.kpc
Epmin = ph_energy[0]*1e-2
Epmax = ph_energy[-1]*1e3
proton_energy = np.logspace(np.log10(Epmin.value),
np.log10(Epmax.value),50)*ph_energy.unit
def ppgamma(pars,data):
PP.particle_distribution.amplitude = pars[0] / u.TeV
PP.particle_distribution.alpha = pars[1]
PP.particle_distribution.e_cutoff = (10**pars[2])*u.TeV
# convert to same units as observed differential spectrum
model = PP.flux(data,distance).to('1/(s cm2 TeV)')
# Save a realization of the particle distribution to the metadata blob
proton_dist= PP.particle_distribution(proton_energy)
return model, model, (proton_energy, proton_dist)
## Prior definition
def lnprior(pars):
"""
Return probability of parameter values according to prior knowledge.
Parameter limits should be done here through uniform prior ditributions
"""
logprob = naima.uniform_prior(pars[0],0.,np.inf) \
+ naima.uniform_prior(pars[1],-1,5)
return logprob
if __name__=='__main__':
## Run sampler
sampler,pos = naima.run_sampler(data_table=data, p0=p0, labels=labels,
model=ppgamma, prior=lnprior, nwalkers=16, nburn=50, nrun=10,
threads=4)
## Save sampler
from astropy.extern import six
from six.moves import cPickle
sampler.pool=None
cPickle.dump(sampler,open('CrabNebula_proton_sampler.pickle','wb'))
## Diagnostic plots
naima.generate_diagnostic_plots('CrabNebula_proton',sampler,sed=True)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/lee/backups/code/iblah_py/ui/ui_profile_dialog.ui'
#
# Created: Fri May 6 21:47:58 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ProfileDialog(object):
def setupUi(self, ProfileDialog):
ProfileDialog.setObjectName(_fromUtf8("ProfileDialog"))
ProfileDialog.setEnabled(True)
ProfileDialog.resize(470, 300)
self.save_btn = QtGui.QPushButton(ProfileDialog)
self.save_btn.setEnabled(True)
self.save_btn.setGeometry(QtCore.QRect(330, 240, 114, 32))
self.save_btn.setObjectName(_fromUtf8("save_btn"))
self.avatar_label = QtGui.QLabel(ProfileDialog)
self.avatar_label.setGeometry(QtCore.QRect(310, 20, 130, 130))
self.avatar_label.setStyleSheet(_fromUtf8("border: 2px solid #ccc;"))
self.avatar_label.setObjectName(_fromUtf8("avatar_label"))
self.label_2 = QtGui.QLabel(ProfileDialog)
self.label_2.setGeometry(QtCore.QRect(21, 117, 26, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.impresa_text_edit = QtGui.QTextEdit(ProfileDialog)
self.impresa_text_edit.setGeometry(QtCore.QRect(80, 170, 361, 51))
self.impresa_text_edit.setObjectName(_fromUtf8("impresa_text_edit"))
self.fullname_line_edit = QtGui.QLineEdit(ProfileDialog)
self.fullname_line_edit.setGeometry(QtCore.QRect(81, 117, 201, 22))
self.fullname_line_edit.setObjectName(_fromUtf8("fullname_line_edit"))
self.label_3 = QtGui.QLabel(ProfileDialog)
self.label_3.setGeometry(QtCore.QRect(21, 21, 39, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(ProfileDialog)
self.label_4.setGeometry(QtCore.QRect(21, 53, 39, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.cellphone_no_line_edit = QtGui.QLineEdit(ProfileDialog)
self.cellphone_no_line_edit.setEnabled(True)
self.cellphone_no_line_edit.setGeometry(QtCore.QRect(81, 53, 201, 22))
self.cellphone_no_line_edit.setText(_fromUtf8(""))
self.cellphone_no_line_edit.setReadOnly(True)
self.cellphone_no_line_edit.setObjectName(_fromUtf8("cellphone_no_line_edit"))
self.fetion_no_line_edit = QtGui.QLineEdit(ProfileDialog)
self.fetion_no_line_edit.setEnabled(True)
self.fetion_no_line_edit.setGeometry(QtCore.QRect(81, 21, 201, 22))
self.fetion_no_line_edit.setText(_fromUtf8(""))
self.fetion_no_line_edit.setReadOnly(True)
self.fetion_no_line_edit.setObjectName(_fromUtf8("fetion_no_line_edit"))
self.label_5 = QtGui.QLabel(ProfileDialog)
self.label_5.setGeometry(QtCore.QRect(21, 85, 33, 16))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.email_line_edit = QtGui.QLineEdit(ProfileDialog)
self.email_line_edit.setEnabled(True)
self.email_line_edit.setGeometry(QtCore.QRect(81, 85, 201, 22))
self.email_line_edit.setText(_fromUtf8(""))
self.email_line_edit.setReadOnly(True)
self.email_line_edit.setObjectName(_fromUtf8("email_line_edit"))
self.label_6 = QtGui.QLabel(ProfileDialog)
self.label_6.setGeometry(QtCore.QRect(21, 170, 52, 16))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.retranslateUi(ProfileDialog)
QtCore.QObject.connect(self.save_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), ProfileDialog.accept)
QtCore.QMetaObject.connectSlotsByName(ProfileDialog)
def retranslateUi(self, ProfileDialog):
ProfileDialog.setWindowTitle(QtGui.QApplication.translate("ProfileDialog", "Profile", None, QtGui.QApplication.UnicodeUTF8))
self.save_btn.setText(QtGui.QApplication.translate("ProfileDialog", "关闭 (&C)", None, QtGui.QApplication.UnicodeUTF8))
self.save_btn.setShortcut(QtGui.QApplication.translate("ProfileDialog", "Return", None, QtGui.QApplication.UnicodeUTF8))
self.avatar_label.setText(QtGui.QApplication.translate("ProfileDialog", "avatar", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("ProfileDialog", "姓名", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("ProfileDialog", "飞信号", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("ProfileDialog", "手机号", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("ProfileDialog", "EMail", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("ProfileDialog", "心情短语", None, QtGui.QApplication.UnicodeUTF8))
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from unittest import main, TestCase
from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
import mock
import six
from six.moves import urllib
from swift.common import internal_client, utils, swob
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
internal_client = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
x.get_process_values(vals)
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
x.get_process_values({})
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
self.obj_containers_in_order = []
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
self.obj_containers_in_order.append(container)
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(self, *a, **kw):
return len(self.containers.keys()), \
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
return [{'name': six.text_type(x)}
for x in self.containers.keys()]
def iter_objects(self, account, container):
return [{'name': six.text_type(x)}
for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
'0': set('1-one 2-two 3-three'.split()),
'1': set('2-two 3-three 4-four'.split()),
'2': set('5-five 6-six'.split()),
'3': set(u'7-seven\u2661'.split()),
}
x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
for i in range(3):
x.process = i
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
self.assertEqual(containers['3'].pop(),
deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
container = 'container'
obj = 'obj'
http_exc = {
resp_code:
internal_client.UnexpectedResponse(
str(resp_code), swob.HTTPException(status=resp_code))
for resp_code in {404, 412, 500}
}
exc_other = Exception()
def check_call_to_delete_object(exc, ts, should_pop):
x.logger.clear()
start_reports = x.report_objects
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, container, obj)
delete_actual.assert_called_once_with(actual_obj, ts)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
self.assertFalse(pop_queue.called)
self.assertEqual(start_reports, x.report_objects)
self.assertEqual(1, len(log_lines))
self.assertIn('Exception while deleting object container obj',
log_lines[0])
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
(http_exc[404], timestamp, False),
(http_exc[412], timestamp, False),
(http_exc[500], reclaim_ts, False),
(exc_other, reclaim_ts, False),
(http_exc[404], reclaim_ts, True),
(http_exc[412], reclaim_ts, True)]:
try:
check_call_to_delete_object(exc, ts, should_pop)
except AssertionError as err:
self.fail("Failed on %r at %f: %s" % (exc, ts, err))
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
self.assertTrue(
'completed' in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue(
'completed' not in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.get_lines_for_level('error'),
["Unhandled exception: "])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_once_unicode_problem(self):
class InternalClient(object):
container_ring = FakeRing()
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return [{'name': u'1234'}]
def iter_objects(*a, **kw):
return [{'name': u'1234-troms\xf8'}]
def make_request(*a, **kw):
pass
def delete_container(*a, **kw):
pass
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests):
x.run_once()
self.assertEqual(len(requests), 3)
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer(self.conf,
logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
logs = x.logger.all_log_lines()
self.assertEqual(logs['info'], [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'), [
'Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][-1]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertTrue('error' not in x.logger.all_log_lines())
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.iter_containers = lambda: [str(int(time() - 86400))]
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
error_lines,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
self.logger._clear()
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
self.assertEqual(
self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'),
['Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, six.text_type):
got_unicode[0] = True
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired',
])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
'container: ' % (cts + 1,)]))
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.get_lines_for_level('error'),
['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'exception 1')
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_raises_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = '1366063156.863045'
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.parse.quote(name))
def test_pop_queue(self):
class InternalClient(object):
container_ring = FakeRing()
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=InternalClient())
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, '.expiring_objects')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import click
from manuel.manuel import Manuel
@click.group()
def manuel():
pass
@manuel.command()
@click.argument('config_file')
@click.option('--index/--no-index', default=False)
@click.option('--recreate/--no-recreate', default=True)
@click.option('--debug/--no-debug', default=False)
@click.option('--name')
def cli_generate_report(config_file, index, recreate, debug, name):
"""
CLI entry point
:param config_file:
:param index:
:param recreate: Recreates the materialized view
:type recreate: bool
:param debug:Enables debug mode
:type debug: bool
:return:
"""
m = Manuel(config_file)
if index:
m.create_index(config_file, debug)
if recreate:
m.generate_materialized_vies(config_file, debug)
if not name:
from os.path import basename, splitext
name = m.config["report"]["general"].get("report_name", None) or \
splitext(basename(config_file))[0] or 'report'
result = m.generate_report(config_file, debug, name)
m.save_results(result, config_file)
def invoke():
manuel()
|
import simplejson as json
from django.core import serializers
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.contrib.contenttypes.models import ContentType
from django.utils.html import strip_tags
from models import Entry
from forms import EntryForm
def save_entry(request):
if not request.is_ajax() or not request.method == 'POST':
raise Http404
form= EntryForm(request.POST)
if not form.is_valid():
return HttpResponse('{}', mimetype='application/javascript')
if 'pk' in request.POST:
entry= get_object_or_404(Entry, pk= request.POST['pk'])
form= EntryForm(request.POST, instance= entry)
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
else:
entry= form.save(commit= False)
entry.body = strip_tags(entry.body)
entry.title = strip_tags(entry.title)
entry.save()
entry_content_type= ContentType.objects.get_for_model(entry.__class__)
response_data= json.dumps({
'pk': entry.pk,
'model': '%s.%s' % (entry_content_type.app_label, entry_content_type.model),
})
return HttpResponse(response_data, mimetype='application/javascript')
def delete_entry(request):
if not request.is_ajax() or not request.method == 'POST' and 'pk' in request.POST:
raise Http404
entry= get_object_or_404(Entry, pk= request.POST['pk'])
entry.delete()
response_data= json.dumps({
'operation': 'complete',
})
return HttpResponse(response_data, mimetype='application/javascript')
def get_data(request):
if not request.is_ajax():
raise Http404
entries= Entry.objects.all()
if len(request.GET):
params_dict= {}
for params in request.GET.iteritems():
param= str(params[0])
value= str(params[1])
params_dict[param]= value
entries= entries.filter(**params_dict)
return HttpResponse(serializers.serialize("json", entries), mimetype='application/javascript')
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Distributor.email_main'
db.delete_column('alibrary_distributor', 'email_main')
# Adding field 'Distributor.email'
db.add_column('alibrary_distributor', 'email',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Distributor.email_main'
db.add_column('alibrary_distributor', 'email_main',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
# Deleting field 'Distributor.email'
db.delete_column('alibrary_distributor', 'email')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'alibrary.apilookup': {
'Meta': {'ordering': "('created',)", 'object_name': 'APILookup'},
'api_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'provider': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'ressource_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aliases_rel_+'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disambiguation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistplugin': {
'Meta': {'object_name': 'ArtistPlugin', 'db_table': "'cmsplugin_artistplugin'", '_ormbases': ['cms.CMSPlugin']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.daypart': {
'Meta': {'ordering': "('day', 'time_start')", 'object_name': 'Daypart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_end': ('django.db.models.fields.TimeField', [], {}),
'time_start': ('django.db.models.fields.TimeField', [], {})
},
'alibrary.distributor': {
'Meta': {'ordering': "('name',)", 'object_name': 'Distributor'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Distributor']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'405431c1-a180-4b22-8383-64392cf37ae9'", 'max_length': '36'})
},
'alibrary.licensetranslation': {
'Meta': {'ordering': "('language_code',)", 'unique_together': "(('language_code', 'master'),)", 'object_name': 'LicenseTranslation', 'db_table': "'alibrary_license_translation'"},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '15', 'blank': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['alibrary.License']"}),
'name_translated': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'base_bitrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_duration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_filesize': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_format': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'base_samplerate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'conversion_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'echoprint_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.files.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'master_sha1': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_release'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'ordering': "('profession__name', 'artist__name')", 'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.mediaformat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Mediaformat'},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'alibrary.mediaplugin': {
'Meta': {'object_name': 'MediaPlugin', 'db_table': "'cmsplugin_mediaplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"})
},
'alibrary.playlist': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'Playlist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'dayparts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'daypart_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Daypart']"}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '12', 'null': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.PlaylistItem']", 'null': 'True', 'through': "orm['alibrary.PlaylistItemPlaylist']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'seasons': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'season_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Season']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'weather': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'weather_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Weather']"})
},
'alibrary.playlistitem': {
'Meta': {'object_name': 'PlaylistItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitemplaylist': {
'Meta': {'object_name': 'PlaylistItemPlaylist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.PlaylistItem']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistmedia': {
'Meta': {'object_name': 'PlaylistMedia'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.relation': {
'Meta': {'ordering': "('url',)", 'object_name': 'Relation'},
'action': ('django.db.models.fields.CharField', [], {'default': "'information'", 'max_length': '50'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'generic'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'alibrary.release': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Release'},
'asin': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_cover_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_label'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Label']"}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'main_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Mediaformat']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'media': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'to': "orm['alibrary.Media']", 'through': "orm['alibrary.ReleaseMedia']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release_country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasedate_approx': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'releasestatus': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'totaltracks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.releasemedia': {
'Meta': {'object_name': 'ReleaseMedia'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaseplugin': {
'Meta': {'object_name': 'ReleasePlugin', 'db_table': "'cmsplugin_releaseplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaserelations': {
'Meta': {'object_name': 'ReleaseRelations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_relation'", 'to': "orm['alibrary.Relation']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.season': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Season'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.weather': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Weather'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arating.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alibrary']
|
import random
import uuid
from datetime import date, datetime, timedelta
import pytest
from app import db
from app.dao import fact_processing_time_dao
from app.dao.email_branding_dao import dao_create_email_branding
from app.dao.inbound_sms_dao import dao_create_inbound_sms
from app.dao.invited_org_user_dao import save_invited_org_user
from app.dao.invited_user_dao import save_invited_user
from app.dao.jobs_dao import dao_create_job
from app.dao.notifications_dao import dao_create_notification
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_create_organisation,
)
from app.dao.permissions_dao import permission_dao
from app.dao.service_callback_api_dao import save_service_callback_api
from app.dao.service_data_retention_dao import insert_service_data_retention
from app.dao.service_inbound_api_dao import save_service_inbound_api
from app.dao.service_permissions_dao import dao_add_service_permission
from app.dao.service_sms_sender_dao import (
dao_update_service_sms_sender,
update_existing_sms_sender_with_inbound_number,
)
from app.dao.services_dao import dao_add_user_to_service, dao_create_service
from app.dao.templates_dao import dao_create_template, dao_update_template
from app.dao.users_dao import save_model_user
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
MOBILE_TYPE,
SMS_TYPE,
AnnualBilling,
ApiKey,
BroadcastEvent,
BroadcastMessage,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber,
BroadcastStatusType,
Complaint,
DailySortedLetter,
Domain,
EmailBranding,
FactBilling,
FactNotificationStatus,
FactProcessingTime,
InboundNumber,
InboundSms,
InvitedOrganisationUser,
InvitedUser,
Job,
LetterBranding,
LetterRate,
Notification,
NotificationHistory,
Organisation,
Permission,
Rate,
ReturnedLetter,
Service,
ServiceCallbackApi,
ServiceContactList,
ServiceEmailReplyTo,
ServiceGuestList,
ServiceInboundApi,
ServiceLetterContact,
ServicePermission,
ServiceSmsSender,
Template,
TemplateFolder,
User,
WebauthnCredential,
)
def create_user(
*,
mobile_number="+447700900986",
email="[email protected]",
state='active',
id_=None,
name="Test User"
):
data = {
'id': id_ or uuid.uuid4(),
'name': name,
'email_address': email,
'password': 'password',
'mobile_number': mobile_number,
'state': state
}
user = User.query.filter_by(email_address=email).first()
if not user:
user = User(**data)
save_model_user(user, validated_email_access=True)
return user
def create_permissions(user, service, *permissions):
permissions = [
Permission(service_id=service.id, user_id=user.id, permission=p)
for p in permissions
]
permission_dao.set_user_service_permission(user, service, permissions, _commit=True)
def create_service(
user=None,
service_name="Sample service",
service_id=None,
restricted=False,
count_as_live=True,
service_permissions=None,
research_mode=False,
active=True,
email_from=None,
prefix_sms=True,
message_limit=1000,
organisation_type='central',
check_if_service_exists=False,
go_live_user=None,
go_live_at=None,
crown=True,
organisation=None,
purchase_order_number=None,
billing_contact_names=None,
billing_contact_email_addresses=None,
billing_reference=None,
):
if check_if_service_exists:
service = Service.query.filter_by(name=service_name).first()
if (not check_if_service_exists) or (check_if_service_exists and not service):
service = Service(
name=service_name,
message_limit=message_limit,
restricted=restricted,
email_from=email_from if email_from else service_name.lower().replace(' ', '.'),
created_by=user if user else create_user(email='{}@digital.cabinet-office.gov.uk'.format(uuid.uuid4())),
prefix_sms=prefix_sms,
organisation_type=organisation_type,
organisation=organisation,
go_live_user=go_live_user,
go_live_at=go_live_at,
crown=crown,
purchase_order_number=purchase_order_number,
billing_contact_names=billing_contact_names,
billing_contact_email_addresses=billing_contact_email_addresses,
billing_reference=billing_reference,
)
dao_create_service(
service,
service.created_by,
service_id,
service_permissions=service_permissions,
)
service.active = active
service.research_mode = research_mode
service.count_as_live = count_as_live
else:
if user and user not in service.users:
dao_add_user_to_service(service, user)
return service
def create_service_with_inbound_number(
inbound_number='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
inbound = create_inbound_number(number=inbound_number, service_id=service.id)
update_existing_sms_sender_with_inbound_number(service_sms_sender=sms_sender,
sms_sender=inbound_number,
inbound_number_id=inbound.id)
return service
def create_service_with_defined_sms_sender(
sms_sender_value='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
dao_update_service_sms_sender(service_id=service.id,
service_sms_sender_id=sms_sender.id,
is_default=True,
sms_sender=sms_sender_value)
return service
def create_template(
service,
template_type=SMS_TYPE,
template_name=None,
subject='Template subject',
content='Dear Sir/Madam, Hello. Yours Truly, The Government.',
reply_to=None,
hidden=False,
archived=False,
folder=None,
postage=None,
process_type='normal',
contact_block_id=None
):
data = {
'name': template_name or '{} Template Name'.format(template_type),
'template_type': template_type,
'content': content,
'service': service,
'created_by': service.created_by,
'reply_to': reply_to,
'hidden': hidden,
'folder': folder,
'process_type': process_type,
}
if template_type == LETTER_TYPE:
data["postage"] = postage or "second"
if contact_block_id:
data['service_letter_contact_id'] = contact_block_id
if template_type != SMS_TYPE:
data['subject'] = subject
template = Template(**data)
dao_create_template(template)
if archived:
template.archived = archived
dao_update_template(template)
return template
def create_notification(
template=None,
job=None,
job_row_number=None,
to_field=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
personalisation=None,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
scheduled_for=None,
normalised_to=None,
one_off=False,
reply_to_text=None,
created_by_id=None,
postage=None,
document_download_count=None,
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if to_field is None:
to_field = '+447700900855' if template.template_type == SMS_TYPE else '[email protected]'
if status not in ('created', 'validation-failed', 'virus-scan-failed', 'pending-virus-check'):
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if not one_off and (job is None and api_key is None):
# we did not specify in test - lets create it
api_key = ApiKey.query.filter(ApiKey.service == template.service, ApiKey.key_type == key_type).first()
if not api_key:
api_key = create_api_key(template.service, key_type=key_type)
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': uuid.uuid4(),
'to': to_field,
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'personalisation': personalisation,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'normalised_to': normalised_to,
'reply_to_text': reply_to_text,
'created_by_id': created_by_id,
'postage': postage,
'document_download_count': document_download_count,
}
notification = Notification(**data)
dao_create_notification(notification)
return notification
def create_notification_history(
template=None,
job=None,
job_row_number=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
created_by_id=None,
postage=None,
id=None
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if status != 'created':
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': id or uuid.uuid4(),
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'created_by_id': created_by_id,
'postage': postage
}
notification_history = NotificationHistory(**data)
db.session.add(notification_history)
db.session.commit()
return notification_history
def create_job(
template,
notification_count=1,
created_at=None,
job_status='pending',
scheduled_for=None,
processing_started=None,
processing_finished=None,
original_file_name='some.csv',
archived=False,
contact_list_id=None,
):
data = {
'id': uuid.uuid4(),
'service_id': template.service_id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'original_file_name': original_file_name,
'notification_count': notification_count,
'created_at': created_at or datetime.utcnow(),
'created_by': template.created_by,
'job_status': job_status,
'scheduled_for': scheduled_for,
'processing_started': processing_started,
'processing_finished': processing_finished,
'archived': archived,
'contact_list_id': contact_list_id,
}
job = Job(**data)
dao_create_job(job)
return job
def create_service_permission(service_id, permission=EMAIL_TYPE):
dao_add_service_permission(
service_id if service_id else create_service().id, permission)
service_permissions = ServicePermission.query.all()
return service_permissions
def create_inbound_sms(
service,
notify_number=None,
user_number='447700900111',
provider_date=None,
provider_reference=None,
content='Hello',
provider="mmg",
created_at=None
):
if not service.inbound_number:
create_inbound_number(
# create random inbound number
notify_number or '07{:09}'.format(random.randint(0, 1e9 - 1)),
provider=provider,
service_id=service.id
)
inbound = InboundSms(
service=service,
created_at=created_at or datetime.utcnow(),
notify_number=service.get_inbound_number(),
user_number=user_number,
provider_date=provider_date or datetime.utcnow(),
provider_reference=provider_reference or 'foo',
content=content,
provider=provider
)
dao_create_inbound_sms(inbound)
return inbound
def create_service_inbound_api(
service,
url="https://something.com",
bearer_token="some_super_secret",
):
service_inbound_api = ServiceInboundApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id
)
save_service_inbound_api(service_inbound_api)
return service_inbound_api
def create_service_callback_api(
service,
url="https://something.com",
bearer_token="some_super_secret",
callback_type="delivery_status"
):
service_callback_api = ServiceCallbackApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id,
callback_type=callback_type
)
save_service_callback_api(service_callback_api)
return service_callback_api
def create_email_branding(colour='blue', logo='test_x2.png', name='test_org_1', text='DisplayName'):
data = {
'colour': colour,
'logo': logo,
'name': name,
'text': text,
}
email_branding = EmailBranding(**data)
dao_create_email_branding(email_branding)
return email_branding
def create_rate(start_date, value, notification_type):
rate = Rate(
id=uuid.uuid4(),
valid_from=start_date,
rate=value,
notification_type=notification_type
)
db.session.add(rate)
db.session.commit()
return rate
def create_letter_rate(start_date=None, end_date=None, crown=True, sheet_count=1, rate=0.33, post_class='second'):
if start_date is None:
start_date = datetime(2016, 1, 1)
rate = LetterRate(
id=uuid.uuid4(),
start_date=start_date,
end_date=end_date,
crown=crown,
sheet_count=sheet_count,
rate=rate,
post_class=post_class
)
db.session.add(rate)
db.session.commit()
return rate
def create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name=None):
id_ = uuid.uuid4()
name = key_name if key_name else '{} api key {}'.format(key_type, id_)
api_key = ApiKey(
service=service,
name=name,
created_by=service.created_by,
key_type=key_type,
id=id_,
secret=uuid.uuid4()
)
db.session.add(api_key)
db.session.commit()
return api_key
def create_inbound_number(number, provider='mmg', active=True, service_id=None):
inbound_number = InboundNumber(
id=uuid.uuid4(),
number=number,
provider=provider,
active=active,
service_id=service_id
)
db.session.add(inbound_number)
db.session.commit()
return inbound_number
def create_reply_to_email(
service,
email_address,
is_default=True,
archived=False
):
data = {
'service': service,
'email_address': email_address,
'is_default': is_default,
'archived': archived,
}
reply_to = ServiceEmailReplyTo(**data)
db.session.add(reply_to)
db.session.commit()
return reply_to
def create_service_sms_sender(
service,
sms_sender,
is_default=True,
inbound_number_id=None,
archived=False
):
data = {
'service_id': service.id,
'sms_sender': sms_sender,
'is_default': is_default,
'inbound_number_id': inbound_number_id,
'archived': archived,
}
service_sms_sender = ServiceSmsSender(**data)
db.session.add(service_sms_sender)
db.session.commit()
return service_sms_sender
def create_letter_contact(
service,
contact_block,
is_default=True,
archived=False
):
data = {
'service': service,
'contact_block': contact_block,
'is_default': is_default,
'archived': archived,
}
letter_content = ServiceLetterContact(**data)
db.session.add(letter_content)
db.session.commit()
return letter_content
def create_annual_billing(
service_id, free_sms_fragment_limit, financial_year_start
):
annual_billing = AnnualBilling(
service_id=service_id,
free_sms_fragment_limit=free_sms_fragment_limit,
financial_year_start=financial_year_start
)
db.session.add(annual_billing)
db.session.commit()
return annual_billing
def create_domain(domain, organisation_id):
domain = Domain(domain=domain, organisation_id=organisation_id)
db.session.add(domain)
db.session.commit()
return domain
def create_organisation(
name='test_org_1',
active=True,
organisation_type=None,
domains=None,
organisation_id=None,
purchase_order_number=None,
billing_contact_names=None,
billing_contact_email_addresses=None,
billing_reference=None,
):
data = {
'id': organisation_id,
'name': name,
'active': active,
'organisation_type': organisation_type,
'purchase_order_number': purchase_order_number,
'billing_contact_names': billing_contact_names,
'billing_contact_email_addresses': billing_contact_email_addresses,
'billing_reference': billing_reference,
}
organisation = Organisation(**data)
dao_create_organisation(organisation)
for domain in domains or []:
create_domain(domain, organisation.id)
return organisation
def create_invited_org_user(organisation, invited_by, email_address='[email protected]'):
invited_org_user = InvitedOrganisationUser(
email_address=email_address,
invited_by=invited_by,
organisation=organisation,
)
save_invited_org_user(invited_org_user)
return invited_org_user
def create_daily_sorted_letter(billing_day=None,
file_name="Notify-20180118123.rs.txt",
unsorted_count=0,
sorted_count=0):
daily_sorted_letter = DailySortedLetter(
billing_day=billing_day or date(2018, 1, 18),
file_name=file_name,
unsorted_count=unsorted_count,
sorted_count=sorted_count
)
db.session.add(daily_sorted_letter)
db.session.commit()
return daily_sorted_letter
def create_ft_billing(bst_date,
template,
*,
provider='test',
rate_multiplier=1,
international=False,
rate=0,
billable_unit=1,
notifications_sent=1,
postage='none'
):
data = FactBilling(bst_date=bst_date,
service_id=template.service_id,
template_id=template.id,
notification_type=template.template_type,
provider=provider,
rate_multiplier=rate_multiplier,
international=international,
rate=rate,
billable_units=billable_unit,
notifications_sent=notifications_sent,
postage=postage)
db.session.add(data)
db.session.commit()
return data
def create_ft_notification_status(
bst_date,
notification_type='sms',
service=None,
template=None,
job=None,
key_type='normal',
notification_status='delivered',
count=1
):
if job:
template = job.template
if template:
service = template.service
notification_type = template.template_type
else:
if not service:
service = create_service()
template = create_template(service=service, template_type=notification_type)
data = FactNotificationStatus(
bst_date=bst_date,
template_id=template.id,
service_id=service.id,
job_id=job.id if job else uuid.UUID(int=0),
notification_type=notification_type,
key_type=key_type,
notification_status=notification_status,
notification_count=count
)
db.session.add(data)
db.session.commit()
return data
def create_process_time(bst_date='2021-03-01', messages_total=35, messages_within_10_secs=34):
data = FactProcessingTime(
bst_date=bst_date,
messages_total=messages_total,
messages_within_10_secs=messages_within_10_secs
)
fact_processing_time_dao.insert_update_processing_time(data)
def create_service_guest_list(service, email_address=None, mobile_number=None):
if email_address:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, email_address)
elif mobile_number:
guest_list_user = ServiceGuestList.from_string(service.id, MOBILE_TYPE, mobile_number)
else:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, '[email protected]')
db.session.add(guest_list_user)
db.session.commit()
return guest_list_user
def create_complaint(service=None,
notification=None,
created_at=None):
if not service:
service = create_service()
if not notification:
template = create_template(service=service, template_type='email')
notification = create_notification(template=template)
complaint = Complaint(notification_id=notification.id,
service_id=service.id,
ses_feedback_id=str(uuid.uuid4()),
complaint_type='abuse',
complaint_date=datetime.utcnow(),
created_at=created_at if created_at else datetime.now()
)
db.session.add(complaint)
db.session.commit()
return complaint
def ses_complaint_callback_malformed_message_id():
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","badMessageId":"ref1","destination":["[email protected]"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback_with_missing_complaint_type():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["[email protected]"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complaintFeedbackType": "abuse", "complainedRecipients":[{"emailAddress":"[email protected]"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["[email protected]"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_notification_callback():
return '{\n "Type" : "Notification",\n "MessageId" : "ref1",' \
'\n "TopicArn" : "arn:aws:sns:eu-west-1:123456789012:testing",' \
'\n "Message" : "{\\"notificationType\\":\\"Delivery\\",' \
'\\"mail\\":{\\"timestamp\\":\\"2016-03-14T12:35:25.909Z\\",' \
'\\"source\\":\\"[email protected]\\",' \
'\\"sourceArn\\":\\"arn:aws:ses:eu-west-1:123456789012:identity/testing-notify\\",' \
'\\"sendingAccountId\\":\\"123456789012\\",' \
'\\"messageId\\":\\"ref1\\",' \
'\\"destination\\":[\\"[email protected]\\"]},' \
'\\"delivery\\":{\\"timestamp\\":\\"2016-03-14T12:35:26.567Z\\",' \
'\\"processingTimeMillis\\":658,' \
'\\"recipients\\":[\\"[email protected]\\"],' \
'\\"smtpResponse\\":\\"250 2.0.0 OK 1457958926 uo5si26480932wjc.221 - gsmtp\\",' \
'\\"reportingMTA\\":\\"a6-238.smtp-out.eu-west-1.amazonses.com\\"}}",' \
'\n "Timestamp" : "2016-03-14T12:35:26.665Z",\n "SignatureVersion" : "1",' \
'\n "Signature" : "X8d7eTAOZ6wlnrdVVPYanrAlsX0SMPfOzhoTEBnQqYkrNWTqQY91C0f3bxtPdUhUt' \
'OowyPAOkTQ4KnZuzphfhVb2p1MyVYMxNKcBFB05/qaCX99+92fjw4x9LeUOwyGwMv5F0Vkfi5qZCcEw69uVrhYL' \
'VSTFTrzi/yCtru+yFULMQ6UhbY09GwiP6hjxZMVr8aROQy5lLHglqQzOuSZ4KeD85JjifHdKzlx8jjQ+uj+FLzHXPMA' \
'PmPU1JK9kpoHZ1oPshAFgPDpphJe+HwcJ8ezmk+3AEUr3wWli3xF+49y8Z2anASSVp6YI2YP95UT8Rlh3qT3T+V9V8rbSVislxA==",' \
'\n "SigningCertURL" : "https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-bb750' \
'dd426d95ee9390147a5624348ee.pem",' \
'\n "UnsubscribeURL" : "https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&S' \
'subscriptionArn=arn:aws:sns:eu-west-1:302763885840:preview-emails:d6aad3ef-83d6-4cf3-a470-54e2e75916da"\n}'
def create_service_data_retention(
service,
notification_type='sms',
days_of_retention=3
):
data_retention = insert_service_data_retention(
service_id=service.id,
notification_type=notification_type,
days_of_retention=days_of_retention
)
return data_retention
def create_invited_user(service=None,
to_email_address=None):
if service is None:
service = create_service()
if to_email_address is None:
to_email_address = '[email protected]'
from_user = service.users[0]
data = {
'service': service,
'email_address': to_email_address,
'from_user': from_user,
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': [str(uuid.uuid4()), str(uuid.uuid4())]
}
invited_user = InvitedUser(**data)
save_invited_user(invited_user)
return invited_user
def create_template_folder(service, name='foo', parent=None):
tf = TemplateFolder(name=name, service=service, parent=parent)
db.session.add(tf)
db.session.commit()
return tf
def create_letter_branding(name='HM Government', filename='hm-government'):
test_domain_branding = LetterBranding(name=name,
filename=filename,
)
db.session.add(test_domain_branding)
db.session.commit()
return test_domain_branding
def set_up_usage_data(start_date):
year = int(start_date.strftime('%Y'))
one_week_earlier = start_date - timedelta(days=7)
two_days_later = start_date + timedelta(days=2)
one_week_later = start_date + timedelta(days=7)
one_month_later = start_date + timedelta(days=31)
# service with sms and letters:
service_1_sms_and_letter = create_service(
service_name='a - with sms and letter',
purchase_order_number="service purchase order number",
billing_contact_names="service billing contact names",
billing_contact_email_addresses="[email protected] [email protected]",
billing_reference="service billing reference"
)
letter_template_1 = create_template(service=service_1_sms_and_letter, template_type='letter')
sms_template_1 = create_template(service=service_1_sms_and_letter, template_type='sms')
create_annual_billing(
service_id=service_1_sms_and_letter.id, free_sms_fragment_limit=10, financial_year_start=year
)
org_1 = create_organisation(
name="Org for {}".format(service_1_sms_and_letter.name),
purchase_order_number="org1 purchase order number",
billing_contact_names="org1 billing contact names",
billing_contact_email_addresses="[email protected] [email protected]",
billing_reference="org1 billing reference"
)
dao_add_service_to_organisation(
service=service_1_sms_and_letter,
organisation_id=org_1.id
)
create_ft_billing(bst_date=one_week_earlier, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=two_days_later, template=sms_template_1, billable_unit=1, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=1, rate=.35, postage='first')
create_ft_billing(bst_date=one_month_later, template=letter_template_1,
notifications_sent=4, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=2, rate=.45, postage='second')
# service with emails only:
service_with_emails = create_service(service_name='b - emails')
email_template = create_template(service=service_with_emails, template_type='email')
org_2 = create_organisation(
name='Org for {}'.format(service_with_emails.name),
)
dao_add_service_to_organisation(service=service_with_emails, organisation_id=org_2.id)
create_ft_billing(bst_date=start_date, template=email_template, notifications_sent=10)
# service with letters:
service_with_letters = create_service(service_name='c - letters only')
letter_template_3 = create_template(service=service_with_letters, template_type='letter')
org_for_service_with_letters = create_organisation(
name="Org for {}".format(service_with_letters.name),
purchase_order_number="org3 purchase order number",
billing_contact_names="org3 billing contact names",
billing_contact_email_addresses="[email protected] [email protected]",
billing_reference="org3 billing reference"
)
dao_add_service_to_organisation(service=service_with_letters, organisation_id=org_for_service_with_letters.id)
create_ft_billing(bst_date=start_date, template=letter_template_3,
notifications_sent=2, billable_unit=3, rate=.50, postage='first')
create_ft_billing(bst_date=one_week_later, template=letter_template_3,
notifications_sent=8, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=one_month_later, template=letter_template_3,
notifications_sent=12, billable_unit=5, rate=.65, postage='second')
# service with letters, without an organisation:
service_with_letters_without_org = create_service(service_name='d - service without org')
letter_template_4 = create_template(service=service_with_letters_without_org, template_type='letter')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=7, billable_unit=4, rate=1.55, postage='rest-of-world')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=8, billable_unit=4, rate=1.55, postage='europe')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=2, billable_unit=1, rate=.35, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=1, billable_unit=1, rate=.50, postage='first')
# service with chargeable SMS, without an organisation
service_with_sms_without_org = create_service(
service_name='b - chargeable sms',
purchase_order_number="sms purchase order number",
billing_contact_names="sms billing contact names",
billing_contact_email_addresses="[email protected] [email protected]",
billing_reference="sms billing reference"
)
sms_template = create_template(service=service_with_sms_without_org, template_type='sms')
create_annual_billing(
service_id=service_with_sms_without_org.id, free_sms_fragment_limit=10, financial_year_start=year
)
create_ft_billing(bst_date=one_week_earlier, template=sms_template, rate=0.11, billable_unit=12)
create_ft_billing(bst_date=two_days_later, template=sms_template, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=sms_template, billable_unit=2, rate=0.11)
# service with SMS within free allowance
service_with_sms_within_allowance = create_service(
service_name='e - sms within allowance'
)
sms_template_2 = create_template(service=service_with_sms_within_allowance, template_type='sms')
create_annual_billing(
service_id=service_with_sms_within_allowance.id, free_sms_fragment_limit=10, financial_year_start=year
)
create_ft_billing(bst_date=one_week_later, template=sms_template_2, billable_unit=2, rate=0.11)
# dictionary with services and orgs to return
return {
"org_1": org_1,
"service_1_sms_and_letter": service_1_sms_and_letter,
"org_2": org_2,
"service_with_emails": service_with_emails,
"org_for_service_with_letters": org_for_service_with_letters,
"service_with_letters": service_with_letters,
"service_with_letters_without_org": service_with_letters_without_org,
"service_with_sms_without_org": service_with_sms_without_org,
"service_with_sms_within_allowance": service_with_sms_within_allowance,
}
def create_returned_letter(service=None, reported_at=None, notification_id=None):
if not service:
service = create_service(service_name='a - with sms and letter')
returned_letter = ReturnedLetter(
service_id=service.id,
reported_at=reported_at or datetime.utcnow(),
notification_id=notification_id or uuid.uuid4(),
created_at=datetime.utcnow(),
)
db.session.add(returned_letter)
db.session.commit()
return returned_letter
def create_service_contact_list(
service=None,
original_file_name='EmergencyContactList.xls',
row_count=100,
template_type='email',
created_by_id=None,
archived=False,
):
if not service:
service = create_service(service_name='service for contact list', user=create_user())
contact_list = ServiceContactList(
service_id=service.id,
original_file_name=original_file_name,
row_count=row_count,
template_type=template_type,
created_by_id=created_by_id or service.users[0].id,
created_at=datetime.utcnow(),
archived=archived,
)
db.session.add(contact_list)
db.session.commit()
return contact_list
def create_broadcast_message(
template=None,
*,
service=None, # only used if template is not provided
created_by=None,
personalisation=None,
content=None,
status=BroadcastStatusType.DRAFT,
starts_at=None,
finishes_at=None,
areas=None,
stubbed=False
):
if template:
service = template.service
template_id = template.id
template_version = template.version
personalisation = personalisation or {}
content = template._as_utils_template_with_personalisation(
personalisation
).content_with_placeholders_filled_in
elif content:
template_id = None
template_version = None
personalisation = None
content = content
else:
pytest.fail('Provide template or content')
broadcast_message = BroadcastMessage(
service_id=service.id,
template_id=template_id,
template_version=template_version,
personalisation=personalisation,
status=status,
starts_at=starts_at,
finishes_at=finishes_at,
created_by_id=created_by.id if created_by else service.created_by_id,
areas=areas or {'areas': [], 'simple_polygons': []},
content=content,
stubbed=stubbed
)
db.session.add(broadcast_message)
db.session.commit()
return broadcast_message
def create_broadcast_event(
broadcast_message,
sent_at=None,
message_type='alert',
transmitted_content=None,
transmitted_areas=None,
transmitted_sender=None,
transmitted_starts_at=None,
transmitted_finishes_at=None,
):
b_e = BroadcastEvent(
service=broadcast_message.service,
broadcast_message=broadcast_message,
sent_at=sent_at or datetime.utcnow(),
message_type=message_type,
transmitted_content=transmitted_content or {'body': 'this is an emergency broadcast message'},
transmitted_areas=transmitted_areas or broadcast_message.areas,
transmitted_sender=transmitted_sender or 'www.notifications.service.gov.uk',
transmitted_starts_at=transmitted_starts_at,
transmitted_finishes_at=transmitted_finishes_at or datetime.utcnow() + timedelta(hours=24),
)
db.session.add(b_e)
db.session.commit()
return b_e
def create_broadcast_provider_message(
broadcast_event,
provider,
status='sending'
):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=status,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message
def create_webauthn_credential(
user,
name='my key',
*,
credential_data='ABC123',
registration_response='DEF456',
):
webauthn_credential = WebauthnCredential(
user=user,
name=name,
credential_data=credential_data,
registration_response=registration_response
)
db.session.add(webauthn_credential)
db.session.commit()
return webauthn_credential
|
# -*- coding: utf-8 -*-
import os
import uuid
import datetime
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from django.utils import simplejson as json
from google.appengine.api import urlfetch
import urllib
import conf
import app.FlyingClub
import app.CoreHandler
from app.models import Comment, Crew
class AuthHandler(webapp.RequestHandler):
###################################################################################################
## Get Actions
###################################################################################################
def get(self, section=None, page=None):
#sessID = self.do_cookie_check()
section = 'auth'
template_vars = {}
App = app.FlyingClub.FlyingClub(section, page)
template_vars['app'] = App
#tvars['appo'] = Appo
#tvars['conf'] = conf
#tvars['user'] = None
#template_vars['crewID'] = crewID
#f 'sessIdent' in self.request.cookies:
#sessIdent = self.request.cookies['sessIdent']
#lse:
# sessIdent = None
## Setup Section and Page
#if section == None:
#section = "index"
#template_vars['section'] = section
#template_vars['page'] = page
## Get Comments
q = db.GqlQuery("SELECT * FROM Comment " +
"WHERE section = :1 " +
"ORDER BY dated DESC",
section)
results = q.fetch(50)
#template_vars['comments'] = results
## Application Object
#template_vars['page_title'] = Appo.title("/%s/" % section)
## Setup User + Aauth
#user = users.get_current_user()
#if not user:
# template_vars['user'] = None
# template_vars['login_url'] = users.create_login_url("/set_session/")
#else:
# template_vars['user'] = user
# template_vars['logout_url'] = users.create_logout_url("/subscribe/")
## Sign In Section
#if section == 'ssignin' :
# if sessID:
# self.redirect("/profile/")
# return
#template_vars['page_title'] = 'Sign In with OpenId'
#if section == 'sdo_logout':
# cook_str = 'sessID=%s; expires=Fri, 31-Dec-1980 23:59:59 GMT; Path=/;' % ''
# self.response.headers.add_header( 'Set-Cookie',
# cook_str
# )
# self.redirect("/")
# return
#if section == 'sprofile':
# if not sessID:
# self.redirect("/signin/")
# return
#template_vars['welcome'] = True if self.request.get("welcome") == '1' else False
#template_vars['page_title'] = 'My Profile'
main_template = '%s.html' % (section)
path = '/%s/' % (section)
#template_vars['path'] = path
template_path = os.path.join(os.path.dirname(__file__), '../templates/pages/%s' % main_template)
self.response.out.write(template.render(template_path, template_vars))
###################################################################################################
## Post Actions
###################################################################################################
def post(self, page=None):
if page == 'rpx':
token = self.request.get('token')
url = 'https://rpxnow.com/api/v2/auth_info'
args = {
'format': 'json',
'apiKey': conf.RPX_API_KEY,
'token': token
}
r = urlfetch.fetch( url=url,
payload=urllib.urlencode(args),
method=urlfetch.POST,
headers={'Content-Type':'application/x-www-form-urlencoded'}
)
data = json.loads(r.content)
if data['stat'] == 'ok':
welcome = 0
unique_identifier = data['profile']['identifier']
q = db.GqlQuery("select * from Crew where ident= :1", unique_identifier)
crew = q.get()
if not crew:
crew = Crew(ident=unique_identifier)
crew.name = data['profile']['preferredUsername']
if data['profile'].has_key('email'):
crew.email = data['profile']['email']
crew.put()
welcome = 1
subject = "New Login: %s" % crew.name
body = "New login on schedule"
else:
subject = "Return Login: %s" % crew.name
body = "New login on schedule"
sessID = str(crew.key())
cook_str = 'crewID=%s; expires=Fri, 31-Dec-2020 23:59:59 GMT; Path=/;' % crew.id()
self.response.headers.add_header( 'Set-Cookie',
cook_str
)
mail.send_mail( sender = conf.EMAIL,
to = "Dev <[email protected]>",
subject = subject,
body = body
)
self.redirect("/profile/?welcome=%s" % welcome)
return
else:
print section, page
#self.redirect("/")
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Despesa.valor'
db.alter_column('financeiro_despesa', 'valor', self.gf('utils.fields.BRDecimalField')(max_digits=14, decimal_places=2))
# Changing field 'Projeto.orcamento'
db.alter_column('financeiro_projeto', 'orcamento', self.gf('utils.fields.BRDecimalField')(max_digits=16, decimal_places=2))
def backwards(self, orm):
# Changing field 'Despesa.valor'
db.alter_column('financeiro_despesa', 'valor', self.gf('django.db.models.fields.DecimalField')(max_digits=14, decimal_places=2))
# Changing field 'Projeto.orcamento'
db.alter_column('financeiro_projeto', 'orcamento', self.gf('django.db.models.fields.DecimalField')(max_digits=16, decimal_places=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cadastro.membro': {
'Meta': {'ordering': "['nome']", 'object_name': 'Membro', '_ormbases': ['cadastro.Pessoa']},
'aprovador': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro_aprovador'", 'null': 'True', 'to': "orm['auth.User']"}),
'assinado': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'atividade_profissional': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'contrib_prox_pgto': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contrib_tipo': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'contrib_valor': ('utils.fields.BRDecimalField', [], {'default': '0', 'max_digits': '7', 'decimal_places': '2'}),
'cpf': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dt_prefiliacao': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dtnascimento': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'endereco': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'endereco_cep': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'endereco_complemento': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'endereco_num': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'estadocivil': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'facebook_access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'filiacao_partidaria': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filiado': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fundador': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'municipio_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'municipio_naturalidade': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome_da_mae': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'pessoa_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cadastro.Pessoa']", 'unique': 'True', 'primary_key': 'True'}),
'rg': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'secao_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'titulo_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'twitter_id': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uf_eleitoral': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']", 'null': 'True', 'blank': 'True'}),
'uf_naturalidade': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'uf_naturalidade'", 'null': 'True', 'to': "orm['municipios.UF']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro'", 'null': 'True', 'to': "orm['auth.User']"}),
'zona_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'cadastro.pessoa': {
'Meta': {'ordering': "['nome']", 'object_name': 'Pessoa'},
'apelido': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'celular': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dtcadastro': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'residencial': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'default': "'O'", 'max_length': '1'}),
'status_email': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'uf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'financeiro.conta': {
'Meta': {'ordering': "('conta',)", 'object_name': 'Conta'},
'ativa': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'conta': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nota': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '1'})
},
'financeiro.deposito': {
'Meta': {'ordering': "['dt']", 'object_name': 'Deposito', '_ormbases': ['financeiro.Operacao']},
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'receita': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Receita']", 'null': 'True', 'blank': 'True'})
},
'financeiro.despesa': {
'Meta': {'object_name': 'Despesa'},
'documento': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'dtemissao': ('django.db.models.fields.DateField', [], {}),
'dtvencimento': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fornecedor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Fornecedor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'integral': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'observacoes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tipo_despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '14', 'decimal_places': '2'})
},
'financeiro.fornecedor': {
'Meta': {'ordering': "('nome',)", 'object_name': 'Fornecedor'},
'ativo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dados_financeiros': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identificador': ('django.db.models.fields.CharField', [], {'max_length': '14'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'servico_padrao': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'})
},
'financeiro.metaarrecadacao': {
'Meta': {'object_name': 'MetaArrecadacao'},
'data_inicial': ('django.db.models.fields.DateField', [], {}),
'data_limite': ('django.db.models.fields.DateField', [], {}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'financeiro.operacao': {
'Meta': {'ordering': "['dt']", 'object_name': 'Operacao'},
'conferido': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'dt': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'obs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'referencia': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'valor': ('django.db.models.fields.DecimalField', [], {'max_digits': '14', 'decimal_places': '2'})
},
'financeiro.pagamento': {
'Meta': {'ordering': "['dt']", 'object_name': 'Pagamento', '_ormbases': ['financeiro.Operacao']},
'despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Despesa']", 'null': 'True', 'blank': 'True'}),
'fornecedor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Fornecedor']"}),
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'projeto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Projeto']", 'null': 'True', 'blank': 'True'}),
'tipo_despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'})
},
'financeiro.periodocontabil': {
'Meta': {'ordering': "['ciclo']", 'object_name': 'PeriodoContabil'},
'ciclo': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publico': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'financeiro.projeto': {
'Meta': {'object_name': 'Projeto'},
'ativo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'descricao': ('django.db.models.fields.TextField', [], {}),
'dtfim': ('django.db.models.fields.DateField', [], {}),
'dtinicio': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'orcamento': ('utils.fields.BRDecimalField', [], {'max_digits': '16', 'decimal_places': '2'}),
'responsavel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'financeiro.receita': {
'Meta': {'ordering': "('conta__conta',)", 'object_name': 'Receita'},
'colaborador': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Membro']", 'null': 'True', 'blank': 'True'}),
'conta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'dtaviso': ('django.db.models.fields.DateField', [], {}),
'dtpgto': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nota': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'financeiro.tipodespesa': {
'Meta': {'object_name': 'TipoDespesa'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'descricao_breve': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'financeiro.transferencia': {
'Meta': {'ordering': "['dt']", 'object_name': 'Transferencia', '_ormbases': ['financeiro.Operacao']},
'destino': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'transf_associada': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Transferencia']", 'null': 'True', 'blank': 'True'})
},
'municipios.uf': {
'Meta': {'ordering': "(u'nome',)", 'object_name': 'UF'},
'id_ibge': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regiao': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'uf': ('django.db.models.fields.CharField', [], {'max_length': '2'})
}
}
complete_apps = ['financeiro']
|
#!/usr/bin/env python2
from __future__ import print_function
from collections import namedtuple
Node = namedtuple('Node', ['color', 'left', 'right', 'low', 'high', 'key'])
def mid(node):
return (node.high + node.low) / 2.0
def makeRed(node):
return Node('red', node.left, node.right, node.low, node.high, node.key)
def makeBlack(node):
return Node('black', node.left, node.right, node.low, node.high, node.key)
def get(node, val, default):
while node:
if node.low <= val and val <= node.high:
return node.key
if val > mid(node):
node = node.right
else:
node = node.left
return default
var_name = 'c'
code_template = \
'''
if ({0.low} <= {1} && {1} <= {0.high}) {{
{5} return {0.key};
{5} }}
{5}if ({1} > {2}) {{
{5} {3}
{5} }}
{5}else {{
{5} {4}
{5} }}
'''[1:-1]
code_template2 = \
'''
if ({0.low} <= {1} && {1} <= {0.high}) {{
{3} return {0.key};
{3} }}
{3}else {{
{3} {2}
{3} }}
'''[1:-1]
def gen_code(node, default, indent=0):
if not node:
return '''return {};'''.format(default)
if node.key == default and not node.left and not node.right:
return '''return {};'''.format(default)
left_code = gen_code(node.left, default, indent + 1);
right_code = gen_code(node.right, default, indent + 1)
if left_code != right_code:
return code_template.format(node, var_name, int(mid(node)),
gen_code(node.left, default, indent + 1),
gen_code(node.right, default, indent + 1),
' ' * indent)
else:
return code_template2.format(node, var_name,
left_code, ' ' * indent)
def get_node(node, val):
while node:
if node.low <= val and val <= node.high:
return node
if val > mid(node):
node = node.right
else:
node = node.left
def insert(low, high, key, node):
val = (low + high) / 2.0
def ins(n):
if not n:
return Node('red', None, None, low, high, key)
if val <= mid(n):
return balance(n.color, ins(n.left), n.right, n.low, n.high, n.key)
else:
return balance(n.color, n.left, ins(n.right), n.low, n.high, n.key)
return makeBlack(ins(node))
def balance(color, left, right, low, high, key):
if left and left.color == 'red':
if left.left and left.left.color == 'red':
new_left = makeBlack(left.left)
new_right = Node('black', left.right, right, low, high, key)
return Node('red', new_left, new_right, left.low, left.high,
left.key)
elif left.right and left.right.color == 'red':
new_left = Node('black', left.left, left.right.left, left.low,
left.high, left.key)
new_right = Node('black', left.right.right, right, low, high,
key)
return Node('red', new_left, new_right, left.right.low,
left.right.high, left.right.key)
elif right and right.color == 'red':
if right.left and right.left.color == 'red':
new_left = Node('black', left, right.left.left, low, high,
key)
new_right = Node('black', right.left.right, right.right, right.low,
right.high, right.key)
return Node('red', new_left, new_right, right.left.low,
right.left.high, right.left.key)
elif right.right and right.right.color == 'red':
new_left = Node('black', left, right.left, low, high, key)
new_right = makeBlack(right.right)
return Node('red', new_left, new_right, right.low, right.high,
right.key)
return Node(color, left, right, low, high, key)
def print_node(node):
if not node:
print('.')
else:
print('{}..{}:{}'.format(node.low, node.high, node.key))
def print_tree(node, indent=0):
print(' ' * indent, end='')
print_node(node)
if node:
print_tree(node.left, indent + 1)
print_tree(node.right, indent + 1)
class Tree(object):
def __init__(self):
self.root = None
def get(self, val, default):
return get(self.root, val, default)
def get_node(self, val):
return get_node(self.root, val)
def insert(self, low, high, key):
self.root = insert(low, high, key, self.root)
def print(self):
print_tree(self.root)
def get_lowest(self):
node = self.root
while node.left:
node = node.left
return node.low
def get_highest(self):
node = self.root
while node.right:
node = node.right
return node.high
def size(self):
def size(node):
if node is None:
return 0
else:
return 1 + size(node.left) + size(node.right)
return size(self.root)
def gen_code(self, default):
return gen_code(self.root, default)
def test_tree():
tree = Tree()
iterations = 1000 * 10
for i in range(1, iterations):
tree.insert(i * 10, i * 10 + 9, 1.0 / i)
for i in range(1, iterations):
assert tree.get(i * 10, 0) == 1.0 / i
tree.print()
cat_map = { 'Lu': 'true', 'Ll': 'true', 'Lm':'true', 'Lo':'true'}
def insert_line(line, tree):
if not line or not line[0].isdigit():
return
no_comment = line.split('#')[0]
parts = [t.strip() for t in no_comment.split(';')]
low = None
high = None
cat = parts[1].strip()
cat = cat_map.get(cat, 'false')
if '..' in parts[0]:
r = parts[0].split('..')
low = int(r[0], base=16)
high = int(r[1], base=16)
else:
low = int(parts[0], base=16)
high = int(parts[0], base=16)
tree.insert(low, high, cat)
def tree_from_filename(filename):
tree = Tree()
with open(filename) as f:
for line in f:
insert_line(line, tree)
return tree
def merge_ranges(tree):
new_tree = Tree()
stop = tree.get_highest()
low = tree.get_lowest()
while low < stop:
high = low
key = tree.get(low, 'false')
while high < stop:
node = tree.get_node(high + 1)
if node is None and key is 'false':
high += 1
elif node.key == key:
high = node.high
else:
break
new_tree.insert(low, high, key)
low = high + 1
return new_tree
def find_key(node, key):
if not node:
return False
if node.key == key:
return True
else:
return find_key(node.left, key) or find_key(node.right, key)
def other(key):
if key == 'true':
return 'false'
else:
return 'true'
def check_merges(node):
if node is None:
return True
out = True
if node.left:
out = out and find_key(node.left, other(node.key))
if node.right:
out = out and find_key(node.right, other(node.key))
return out and check_merges(node.left) and check_merges(node.right)
def get_merged_tree(filename):
tree = tree_from_filename(filename)
mtree = merge_ranges(tree)
assert check_merges(mtree.root)
return mtree
def main():
import sys
mtree = get_merged_tree(sys.argv[2])
with open(sys.argv[1], 'w') as f:
f.write(mtree.gen_code('false'))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""test_nba
Tests for `nba` module.
"""
from sportsstats import nba
import unittest
class TestNba(unittest.TestCase):
def setUp(self):
from datetime import datetime
april_9 = datetime(2016, 4, 9)
self.nba_stats = nba.Stats(april_9, april_9)
self.expected_query_url = (
"/stats/leaguedashptstats?"
"College=&Conference=&Country=&DateFrom=04%2F09%2F2016&"
"DateTo=04%2F09%2F2016&Division=&DraftPick=&DraftYear=&"
"GameScope=&Height=&LastNGames=0&LeagueID=00&Location=&"
"Month=0&OpponentTeamID=0&Outcome=&PORound=0&PerMode=Totals&"
"PlayerExperience=&PlayerOrTeam=Player&PlayerPosition=&"
"PtMeasureType=SpeedDistance&Season=2015-16&SeasonSegment=&"
"SeasonType=Regular+Season&StarterBench=&TeamID=0&"
"VsConference=&VsDivision=&Weight="
)
pass
def tearDown(self):
del self.nba_stats
pass
def test_build_query_url(self):
actual = self.nba_stats._Stats__build_query_url()
self.assertEqual(actual, self.expected_query_url)
def test_send_get_request(self):
connection = self.nba_stats._Stats__send_get_request(
self.expected_query_url)
actual = connection.getresponse().status
self.assertEqual(actual, 200)
connection.close()
def test_download(self):
data = json.loads(self.nba_stats.download())
expected = [
'PLAYER_ID', 'PLAYER_NAME', 'TEAM_ID', 'TEAM_ABBREVIATION',
'GP', 'W', 'L', 'MIN', 'DIST_FEET', 'DIST_MILES',
'DIST_MILES_OFF', 'DIST_MILES_DEF', 'AVG_SPEED',
'AVG_SPEED_OFF', 'AVG_SPEED_DEF'
]
actual = data['resultSets'][0]['headers']
self.assertEqual(actual, expected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
"""Core classes and exceptions for Simple-Salesforce"""
# has to be defined prior to login import
DEFAULT_API_VERSION = '29.0'
import requests
import json
try:
from urlparse import urlparse
except ImportError:
# Python 3+
from urllib.parse import urlparse
from simple_salesforce.login import SalesforceLogin
from simple_salesforce.util import date_to_iso8601, SalesforceError
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
class Salesforce(object):
"""Salesforce Instance
An instance of Salesforce is a handy way to wrap a Salesforce session
for easy use of the Salesforce REST API.
"""
def __init__(
self, username=None, password=None, security_token=None,
session_id=None, instance=None, instance_url=None,
organizationId=None, sandbox=False, version=DEFAULT_API_VERSION,
proxies=None, session=None):
"""Initialize the instance with the given parameters.
Available kwargs
Password Authentication:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- True if you want to login to `test.salesforce.com`, False
if you want to login to `login.salesforce.com`.
Direct Session and Instance Access:
* session_id -- Access token for this session
Then either
* instance -- Domain of your Salesforce instance, i.e. `na1.salesforce.com`
OR
* instance_url -- Full URL of your instance i.e. `https://na1.salesforce.com
Universal Kwargs:
* version -- the version of the Salesforce API to use, for example `29.0`
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requets Session features not otherwise
exposed by simple_salesforce.
"""
# Determine if the user passed in the optional version and/or sandbox kwargs
self.sf_version = version
self.sandbox = sandbox
self.proxies = proxies
# Determine if the user wants to use our username/password auth or pass in their own information
if all(arg is not None for arg in (username, password, security_token)):
self.auth_type = "password"
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=session,
username=username,
password=password,
security_token=security_token,
sandbox=self.sandbox,
sf_version=self.sf_version,
proxies=self.proxies)
elif all(arg is not None for arg in (session_id, instance or instance_url)):
self.auth_type = "direct"
self.session_id = session_id
# If the user provides the full url (as returned by the OAuth interface for
# example) extract the hostname (which we rely on)
if instance_url is not None:
self.sf_instance = urlparse(instance_url).hostname
else:
self.sf_instance = instance
elif all(arg is not None for arg in (username, password, organizationId)):
self.auth_type = 'ipfilter'
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=session,
username=username,
password=password,
organizationId=organizationId,
sandbox=self.sandbox,
sf_version=self.sf_version,
proxies=self.proxies)
else:
raise TypeError(
'You must provide login information or an instance and token'
)
if self.sandbox:
self.auth_site = 'https://test.salesforce.com'
else:
self.auth_site = 'https://login.salesforce.com'
self.request = session or requests.Session()
self.request.proxies = self.proxies
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
self.base_url = ('https://{instance}/services/data/v{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.apex_url = ('https://{instance}/services/apexrest/'
.format(instance=self.sf_instance))
def describe(self):
url = self.base_url + "sobjects"
result = self.request.get(url, headers=self.headers)
if result.status_code != 200:
raise SalesforceGeneralError(url,
'describe',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
# SObject Handler
def __getattr__(self, name):
"""Returns an `SFType` instance for the given Salesforce object type
(given in `name`).
The magic part of the SalesforceAPI, this function translates
calls such as `salesforce_api_instance.Lead.metadata()` into fully
constituted `SFType` instances to make a nice Python API wrapper
for the REST API.
Arguments:
* name -- the name of a Salesforce object type, e.g. Lead or Contact
"""
# fix to enable serialization (https://github.com/heroku/simple-salesforce/issues/60)
if name.startswith('__'):
return super(Salesforce, self).__getattr__(name)
return SFType(name, self.session_id, self.sf_instance, self.sf_version, self.proxies)
# User utlity methods
def set_password(self, user, password):
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: the new password
"""
url = self.base_url + 'sobjects/User/%s/password' % user
params = { 'NewPassword' : password, }
result = self.request.post(url, headers=self.headers, data=json.dumps(params))
# salesforce return 204 No Content when the request is successful
if result.status_code != 200 and result.status_code != 204:
raise SalesforceGeneralError(url,
'User',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
def setPassword(self, user, password):
import warnings
warnings.warn(
"This method has been deprecated. Please use set_password instread.", DeprecationWarning)
return self.set_password(user, password)
# Generic Rest Function
def restful(self, path, params):
"""Allows you to make a direct REST call if you know the path
Arguments:
* path: The path of the request
Example: sobjects/User/ABC123/password'
* params: dict of parameters to pass to the path
"""
url = self.base_url + path
result = self.request.get(url, headers=self.headers, params=params)
if result.status_code != 200:
raise SalesforceGeneralError(url,
path,
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
# Search Functions
def search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`
"""
url = self.base_url + 'search/'
# `requests` will correctly encode the query string passed as `params`
params = {'q': search}
result = self.request.get(url, headers=self.headers, params=params)
if result.status_code != 200:
raise SalesforceGeneralError(url,
'search',
result.status_code,
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
def quick_search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the non-SOSL search string, e.g. `Waldo`. This search
string will be wrapped to read `FIND {Waldo}` before being
sent to Salesforce
"""
search_string = u'FIND {{{search_string}}}'.format(search_string=search)
return self.search(search_string)
# Query Handler
def query(self, query, **kwargs):
"""Return the result of a Salesforce SOQL query as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "[email protected]"`
"""
url = self.base_url + 'query/'
params = {'q': query}
# `requests` will correctly encode the query string passed as `params`
result = self.request.get(url, headers=self.headers, params=params, **kwargs)
if result.status_code != 200:
_exception_handler(result)
return result.json(object_pairs_hook=OrderedDict)
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
Arguments:
* next_records_identifier -- either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
* identifier_is_url -- True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.sf_instance,
next_record_url=next_records_identifier))
else:
url = self.base_url + 'query/{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
result = self.request.get(url, headers=self.headers, **kwargs)
if result.status_code != 200:
_exception_handler(result)
return result.json(object_pairs_hook=OrderedDict)
def query_all(self, query, **kwargs):
"""Returns the full set of results for the `query`. This is a
convenience wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments
* query -- the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "[email protected]"`
"""
def get_all_results(previous_result, **kwargs):
"""Inner function for recursing until there are no more results.
Returns the full set of results that will be the return value for
`query_all(...)`
Arguments:
* previous_result -- the modified result of previous calls to
Salesforce for this query
"""
if previous_result['done']:
return previous_result
else:
result = self.query_more(previous_result['nextRecordsUrl'],
identifier_is_url=True, **kwargs)
result['totalSize'] += previous_result['totalSize']
# Include the new list of records with the previous list
previous_result['records'].extend(result['records'])
result['records'] = previous_result['records']
# Continue the recursion
return get_all_results(result, **kwargs)
# Make the initial query to Salesforce
result = self.query(query, **kwargs)
# The number of results might have exceeded the Salesforce batch limit
# so check whether there are more results and retrieve them if so.
return get_all_results(result, **kwargs)
def apexecute(self, action, method='GET', data=None, **kwargs):
"""Makes an HTTP request to an APEX REST endpoint
Arguments:
* action -- The REST endpoint for the request.
* method -- HTTP method for the request (default GET)
* data -- A dict of parameters to send in a POST / PUT request
* kwargs -- Additional kwargs to pass to `requests.request`
"""
result = self._call_salesforce(method, self.apex_url + action,
data=json.dumps(data), **kwargs)
if result.status_code == 200:
try:
response_content = result.json()
except Exception:
response_content = result.text
return response_content
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
result = self.request.request(method, url, headers=self.headers, **kwargs)
if result.status_code >= 300:
_exception_handler(result)
return result
class SFType(object):
"""An interface to a specific type of SObject"""
def __init__(self, object_name, session_id, sf_instance, sf_version='27.0', proxies=None):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* session_id -- the session ID for authenticating to Salesforce
* sf_instance -- the domain of the instance of Salesforce to use
* sf_version -- the version of the Salesforce API to use
* proxies -- the optional map of scheme to proxy server
"""
self.session_id = session_id
self.name = object_name
self.request = requests.Session()
self.request.proxies = proxies
self.base_url = (u'https://{instance}/services/data/v{sf_version}/sobjects/{object_name}/'
.format(instance=sf_instance,
object_name=object_name,
sf_version=sf_version))
def metadata(self):
"""Returns the result of a GET to `.../{object_name}/` as a dict
decoded from the JSON payload returned by Salesforce.
"""
result = self._call_salesforce('GET', self.base_url)
return result.json(object_pairs_hook=OrderedDict)
def describe(self):
"""Returns the result of a GET to `.../{object_name}/describe` as a
dict decoded from the JSON payload returned by Salesforce.
"""
result = self._call_salesforce('GET', self.base_url + 'describe')
return result.json(object_pairs_hook=OrderedDict)
def describe_layout(self, record_id):
"""Returns the result of a GET to `.../{object_name}/describe/layouts/<recordid>` as a
dict decoded from the JSON payload returned by Salesforce.
"""
result = self._call_salesforce('GET', self.base_url + 'describe/layouts/' + record_id)
return result.json(object_pairs_hook=OrderedDict)
def get(self, record_id):
"""Returns the result of a GET to `.../{object_name}/{record_id}` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
"""
result = self._call_salesforce('GET', self.base_url + record_id)
return result.json(object_pairs_hook=OrderedDict)
def get_by_custom_id(self, custom_id_field, custom_id):
"""Returns the result of a GET to `.../{object_name}/{custom_id_field}/{custom_id}` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* custom_id_field -- the API name of a custom field that was defined as an External ID
* custom_id - the External ID value of the SObject to get
"""
custom_url = self.base_url + '{custom_id_field}/{custom_id}'.format(
custom_id_field=custom_id_field, custom_id=custom_id)
result = self._call_salesforce('GET', custom_url)
return result.json(object_pairs_hook=OrderedDict)
def create(self, data):
"""Creates a new SObject using a POST to `.../{object_name}/`.
Returns a dict decoded from the JSON payload returned by Salesforce.
Arguments:
* data -- a dict of the data to create the SObject from. It will be
JSON-encoded before being transmitted.
"""
result = self._call_salesforce('POST', self.base_url,
data=json.dumps(data))
return result.json(object_pairs_hook=OrderedDict)
def upsert(self, record_id, data, raw_response=False):
"""Creates or updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- an identifier for the SObject as described in the
Salesforce documentation
* data -- a dict of the data to create or update the SObject from. It
will be JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
"""
result = self._call_salesforce('PATCH', self.base_url + record_id,
data=json.dumps(data))
return self._raw_response(result, raw_response)
def update(self, record_id, data, raw_response=False):
"""Updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to update
* data -- a dict of the data to update the SObject from. It will be
JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
"""
result = self._call_salesforce('PATCH', self.base_url + record_id,
data=json.dumps(data))
return self._raw_response(result, raw_response)
def delete(self, record_id, raw_response=False):
"""Deletes an SObject using a DELETE to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to delete
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
"""
result = self._call_salesforce('DELETE', self.base_url + record_id)
return self._raw_response(result, raw_response)
def deleted(self, start, end):
"""Use the SObject Get Deleted resource to get a list of deleted records for the specified object.
.../deleted/?start=2013-05-05T00:00:00+00:00&end=2013-05-10T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
"""
url = self.base_url + 'deleted/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end))
result = self._call_salesforce('GET', url)
return result.json(object_pairs_hook=OrderedDict)
def updated(self, start, end):
"""Use the SObject Get Updated resource to get a list of updated (modified or added)
records for the specified object.
.../updated/?start=2014-03-20T00:00:00+00:00&end=2014-03-22T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
"""
url = self.base_url + 'updated/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end))
result = self._call_salesforce('GET', url)
return result.json(object_pairs_hook=OrderedDict)
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
result = self.request.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
_exception_handler(result, self.name)
return result
def _raw_response(self, response, body_flag):
"""Utility method for processing the response and returning either the
status code or the response object.
Returns either an `int` or a `requests.Response` object.
"""
if not body_flag:
return response.status_code
else:
return response
class SalesforceAPI(Salesforce):
"""Depreciated SalesforceAPI Instance
This class implements the Username/Password Authentication Mechanism using Arguments
It has since been surpassed by the 'Salesforce' class, which relies on kwargs
"""
def __init__(self, username, password, security_token, sandbox=False,
sf_version='27.0'):
"""Initialize the instance with the given parameters.
Arguments:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- True if you want to login to `test.salesforce.com`, False
if you want to login to `login.salesforce.com`.
* sf_version -- the version of the Salesforce API to use, for example
"27.0"
"""
import warnings
warnings.warn(
"Use of login arguments has been depreciated. Please use kwargs",
DeprecationWarning
)
super(SalesforceAPI, self).__init__(username=username,
password=password,
security_token=security_token,
sandbox=sandbox,
version=sf_version)
def _exception_handler(result, name=""):
"""Exception router. Determines which error to raise for bad results"""
try:
response_content = result.json()
except Exception:
response_content = result.text
exc_map = {
300: SalesforceMoreThanOneRecord,
400: SalesforceMalformedRequest,
401: SalesforceExpiredSession,
403: SalesforceRefusedRequest,
404: SalesforceResourceNotFound,
}
exc_cls = exc_map.get(result.status_code, SalesforceGeneralError)
raise exc_cls(result.url, result.status_code, name, response_content)
class SalesforceMoreThanOneRecord(SalesforceError):
"""
Error Code: 300
The value returned when an external ID exists in more than one record. The
response body contains the list of matching records.
"""
message = u"More than one record for {url}. Response content: {content}"
class SalesforceMalformedRequest(SalesforceError):
"""
Error Code: 400
The request couldn't be understood, usually becaue the JSON or XML body contains an error.
"""
message = u"Malformed request {url}. Response content: {content}"
class SalesforceExpiredSession(SalesforceError):
"""
Error Code: 401
The session ID or OAuth token used has expired or is invalid. The response
body contains the message and errorCode.
"""
message = u"Expired session for {url}. Response content: {content}"
class SalesforceRefusedRequest(SalesforceError):
"""
Error Code: 403
The request has been refused. Verify that the logged-in user has
appropriate permissions.
"""
message = u"Request refused for {url}. Response content: {content}"
class SalesforceResourceNotFound(SalesforceError):
"""
Error Code: 404
The requested resource couldn't be found. Check the URI for errors, and
verify that there are no sharing issues.
"""
message = u'Resource {name} Not Found. Response content: {content}'
def __str__(self):
return self.message.format(name=self.resource_name,
content=self.content)
class SalesforceGeneralError(SalesforceError):
"""
A non-specific Salesforce error.
"""
message = u'Error Code {status}. Response content: {content}'
def __str__(self):
return self.message.format(status=self.status, content=self.content)
|
"""
YumConf - file ``/etc/yum.conf``
================================
This module provides parsing for the ``/etc/yum.conf`` file.
The ``YumConf`` class parses the information in the file
``/etc/yum.conf``. See the ``IniConfigFile`` class for more
information on attributes and methods.
Sample input data looks like::
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=3
[rhel-7-server-rpms]
metadata_expire = 86400
baseurl = https://cdn.redhat.com/content/rhel/server/7/$basearch
name = Red Hat Enterprise Linux 7 Server (RPMs)
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
enabled = 1
gpgcheck = 1
Examples:
>>> yconf = shared[YumConf]
>>> yconf.defaults()
{'admin_token': 'ADMIN', 'compute_port': '8774'}
>>> 'main' in yconf
True
>>> 'rhel-7-server-rpms' in yconf
True
>>> yconf.has_option('main', 'gpgcheck')
True
>>> yconf.has_option('main', 'foo')
False
>>> yconf.get('rhel-7-server-rpms', 'enabled')
'1'
>>> yconf.items('main')
{'plugins': '1',
'keepcache': '0',
'cachedir': '/var/cache/yum/$basearch/$releasever',
'exactarch': '1',
'obsoletes': '1',
'installonly_limit': '3',
'debuglevel': '2',
'gpgcheck': '1',
'logfile': '/var/log/yum.log'}
"""
from insights.contrib.ConfigParser import NoOptionError
from .. import parser, IniConfigFile
from insights.specs import yum_conf
@parser(yum_conf)
class YumConf(IniConfigFile):
"""Parse contents of file ``/etc/yum.conf``."""
def parse_content(self, content):
super(YumConf, self).parse_content(content)
# File /etc/yum.conf may contain repos definitions.
# Keywords 'gpgkey' and 'baseurl' might contain multiple
# values separated by comma. Convert those values into a list.
for section in self.sections():
for key in ('gpgkey', 'baseurl'):
try:
value = self.get(section, key)
if value and isinstance(value, str):
self.data.set(section, key, value.split(','))
except NoOptionError:
pass
|
""" Cisco_IOS_XR_man_xml_ttyagent_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR man\-xml\-ttyagent package operational data.
This module contains definitions
for the following management objects\:
netconf\: NETCONF operational information
xr\-xml\: xr xml
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class XrXmlSessionAlarmRegisterEnum(Enum):
"""
XrXmlSessionAlarmRegisterEnum
AlarmNotify
.. data:: registered = 1
Registered
.. data:: not_registered = 2
NotRegistered
"""
registered = 1
not_registered = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXmlSessionAlarmRegisterEnum']
class XrXmlSessionStateEnum(Enum):
"""
XrXmlSessionStateEnum
SessionState
.. data:: idle = 1
Idle
.. data:: busy = 2
Busy
"""
idle = 1
busy = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXmlSessionStateEnum']
class Netconf(object):
"""
NETCONF operational information
.. attribute:: agent
NETCONF agent operational information
**type**\: :py:class:`Agent <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.Netconf.Agent>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.agent = Netconf.Agent()
self.agent.parent = self
class Agent(object):
"""
NETCONF agent operational information
.. attribute:: tty
NETCONF agent over TTY
**type**\: :py:class:`Tty <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.Netconf.Agent.Tty>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.tty = Netconf.Agent.Tty()
self.tty.parent = self
class Tty(object):
"""
NETCONF agent over TTY
.. attribute:: sessions
Session information
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.Netconf.Agent.Tty.Sessions>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.sessions = Netconf.Agent.Tty.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
Session information
.. attribute:: session
Session information
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.Netconf.Agent.Tty.Sessions.Session>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
Session information
.. attribute:: session_id <key>
Session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: admin_config_session_id
Admin config session ID
**type**\: str
.. attribute:: alarm_notification
is the session registered for alarm notifications
**type**\: :py:class:`XrXmlSessionAlarmRegisterEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionAlarmRegisterEnum>`
.. attribute:: client_address
ip address of the client
**type**\: str
.. attribute:: client_port
client's port
**type**\: int
**range:** 0..4294967295
.. attribute:: config_session_id
Config session ID
**type**\: str
.. attribute:: elapsed_time
Elapsed time(seconds) since a session is created
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: last_state_change
Time(seconds) since last session state change happened
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: start_time
session start time in seconds since the Unix Epoch
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: state
state of the session idle/busy
**type**\: :py:class:`XrXmlSessionStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionStateEnum>`
.. attribute:: username
Username
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session_id = None
self.admin_config_session_id = None
self.alarm_notification = None
self.client_address = None
self.client_port = None
self.config_session_id = None
self.elapsed_time = None
self.last_state_change = None
self.start_time = None
self.state = None
self.username = None
self.vrf_name = None
@property
def _common_path(self):
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:netconf/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions/Cisco-IOS-XR-man-xml-ttyagent-oper:session[Cisco-IOS-XR-man-xml-ttyagent-oper:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_id is not None:
return True
if self.admin_config_session_id is not None:
return True
if self.alarm_notification is not None:
return True
if self.client_address is not None:
return True
if self.client_port is not None:
return True
if self.config_session_id is not None:
return True
if self.elapsed_time is not None:
return True
if self.last_state_change is not None:
return True
if self.start_time is not None:
return True
if self.state is not None:
return True
if self.username is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['Netconf.Agent.Tty.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:netconf/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['Netconf.Agent.Tty.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:netconf/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['Netconf.Agent.Tty']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:netconf/Cisco-IOS-XR-man-xml-ttyagent-oper:agent'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tty is not None and self.tty._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['Netconf.Agent']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:netconf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.agent is not None and self.agent._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['Netconf']['meta_info']
class XrXml(object):
"""
xr xml
.. attribute:: agent
XML agents
**type**\: :py:class:`Agent <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.agent = XrXml.Agent()
self.agent.parent = self
class Agent(object):
"""
XML agents
.. attribute:: default
Default sessions information
**type**\: :py:class:`Default <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Default>`
.. attribute:: ssl
SSL sessions information
**type**\: :py:class:`Ssl <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Ssl>`
.. attribute:: tty
TTY sessions information
**type**\: :py:class:`Tty <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Tty>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.default = XrXml.Agent.Default()
self.default.parent = self
self.ssl = XrXml.Agent.Ssl()
self.ssl.parent = self
self.tty = XrXml.Agent.Tty()
self.tty.parent = self
class Tty(object):
"""
TTY sessions information
.. attribute:: sessions
sessions information
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Tty.Sessions>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.sessions = XrXml.Agent.Tty.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
sessions information
.. attribute:: session
xml sessions information
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Tty.Sessions.Session>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
xml sessions information
.. attribute:: session_id <key>
Session Id
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: admin_config_session_id
Admin config session ID
**type**\: str
.. attribute:: alarm_notification
is the session registered for alarm notifications
**type**\: :py:class:`XrXmlSessionAlarmRegisterEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionAlarmRegisterEnum>`
.. attribute:: client_address
ip address of the client
**type**\: str
.. attribute:: client_port
client's port
**type**\: int
**range:** 0..4294967295
.. attribute:: config_session_id
Config session ID
**type**\: str
.. attribute:: elapsed_time
Elapsed time(seconds) since a session is created
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: last_state_change
Time(seconds) since last session state change happened
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: start_time
session start time in seconds since the Unix Epoch
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: state
state of the session idle/busy
**type**\: :py:class:`XrXmlSessionStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionStateEnum>`
.. attribute:: username
Username
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session_id = None
self.admin_config_session_id = None
self.alarm_notification = None
self.client_address = None
self.client_port = None
self.config_session_id = None
self.elapsed_time = None
self.last_state_change = None
self.start_time = None
self.state = None
self.username = None
self.vrf_name = None
@property
def _common_path(self):
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions/Cisco-IOS-XR-man-xml-ttyagent-oper:session[Cisco-IOS-XR-man-xml-ttyagent-oper:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_id is not None:
return True
if self.admin_config_session_id is not None:
return True
if self.alarm_notification is not None:
return True
if self.client_address is not None:
return True
if self.client_port is not None:
return True
if self.config_session_id is not None:
return True
if self.elapsed_time is not None:
return True
if self.last_state_change is not None:
return True
if self.start_time is not None:
return True
if self.state is not None:
return True
if self.username is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Tty.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Tty.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:tty'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Tty']['meta_info']
class Default(object):
"""
Default sessions information
.. attribute:: sessions
sessions information
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Default.Sessions>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.sessions = XrXml.Agent.Default.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
sessions information
.. attribute:: session
xml sessions information
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Default.Sessions.Session>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
xml sessions information
.. attribute:: session_id <key>
Session Id
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: admin_config_session_id
Admin config session ID
**type**\: str
.. attribute:: alarm_notification
is the session registered for alarm notifications
**type**\: :py:class:`XrXmlSessionAlarmRegisterEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionAlarmRegisterEnum>`
.. attribute:: client_address
ip address of the client
**type**\: str
.. attribute:: client_port
client's port
**type**\: int
**range:** 0..4294967295
.. attribute:: config_session_id
Config session ID
**type**\: str
.. attribute:: elapsed_time
Elapsed time(seconds) since a session is created
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: last_state_change
Time(seconds) since last session state change happened
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: start_time
session start time in seconds since the Unix Epoch
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: state
state of the session idle/busy
**type**\: :py:class:`XrXmlSessionStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionStateEnum>`
.. attribute:: username
Username
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session_id = None
self.admin_config_session_id = None
self.alarm_notification = None
self.client_address = None
self.client_port = None
self.config_session_id = None
self.elapsed_time = None
self.last_state_change = None
self.start_time = None
self.state = None
self.username = None
self.vrf_name = None
@property
def _common_path(self):
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:default/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions/Cisco-IOS-XR-man-xml-ttyagent-oper:session[Cisco-IOS-XR-man-xml-ttyagent-oper:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_id is not None:
return True
if self.admin_config_session_id is not None:
return True
if self.alarm_notification is not None:
return True
if self.client_address is not None:
return True
if self.client_port is not None:
return True
if self.config_session_id is not None:
return True
if self.elapsed_time is not None:
return True
if self.last_state_change is not None:
return True
if self.start_time is not None:
return True
if self.state is not None:
return True
if self.username is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Default.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:default/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Default.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:default'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Default']['meta_info']
class Ssl(object):
"""
SSL sessions information
.. attribute:: sessions
sessions information
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Ssl.Sessions>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.sessions = XrXml.Agent.Ssl.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
sessions information
.. attribute:: session
xml sessions information
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXml.Agent.Ssl.Sessions.Session>`
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
xml sessions information
.. attribute:: session_id <key>
Session Id
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: admin_config_session_id
Admin config session ID
**type**\: str
.. attribute:: alarm_notification
is the session registered for alarm notifications
**type**\: :py:class:`XrXmlSessionAlarmRegisterEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionAlarmRegisterEnum>`
.. attribute:: client_address
ip address of the client
**type**\: str
.. attribute:: client_port
client's port
**type**\: int
**range:** 0..4294967295
.. attribute:: config_session_id
Config session ID
**type**\: str
.. attribute:: elapsed_time
Elapsed time(seconds) since a session is created
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: last_state_change
Time(seconds) since last session state change happened
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: start_time
session start time in seconds since the Unix Epoch
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: state
state of the session idle/busy
**type**\: :py:class:`XrXmlSessionStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_xml_ttyagent_oper.XrXmlSessionStateEnum>`
.. attribute:: username
Username
**type**\: str
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'man-xml-ttyagent-oper'
_revision = '2015-07-30'
def __init__(self):
self.parent = None
self.session_id = None
self.admin_config_session_id = None
self.alarm_notification = None
self.client_address = None
self.client_port = None
self.config_session_id = None
self.elapsed_time = None
self.last_state_change = None
self.start_time = None
self.state = None
self.username = None
self.vrf_name = None
@property
def _common_path(self):
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:ssl/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions/Cisco-IOS-XR-man-xml-ttyagent-oper:session[Cisco-IOS-XR-man-xml-ttyagent-oper:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session_id is not None:
return True
if self.admin_config_session_id is not None:
return True
if self.alarm_notification is not None:
return True
if self.client_address is not None:
return True
if self.client_port is not None:
return True
if self.config_session_id is not None:
return True
if self.elapsed_time is not None:
return True
if self.last_state_change is not None:
return True
if self.start_time is not None:
return True
if self.state is not None:
return True
if self.username is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Ssl.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:ssl/Cisco-IOS-XR-man-xml-ttyagent-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Ssl.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent/Cisco-IOS-XR-man-xml-ttyagent-oper:ssl'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent.Ssl']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml/Cisco-IOS-XR-man-xml-ttyagent-oper:agent'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.default is not None and self.default._has_data():
return True
if self.ssl is not None and self.ssl._has_data():
return True
if self.tty is not None and self.tty._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml.Agent']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-man-xml-ttyagent-oper:xr-xml'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.agent is not None and self.agent._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_man_xml_ttyagent_oper as meta
return meta._meta_table['XrXml']['meta_info']
|
'''
Takes in a list of values from the database and creates a facesheet.
'''
import os
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
def assemble_address(street, apartment, city, state, zip_code):
address = street.title()
if apartment:
address += f' APT: {apartment.title()}'
address += f' {city.title()}, '
address += state.upper()
address += ' ' + zip_code
return address
def parse_row(row_list):
info = {'case_number': row_list[1],
'occurred_date': row_list[2],
'incident_type': row_list[3].title(),
'age': row_list[5],
'name': row_list[7].title(),
'address': assemble_address(row_list[8], row_list[9],
row_list[10], row_list[11],
row_list[12],
),
'DOB': row_list[13],
'phone': row_list[14],
'race': row_list[15].title(),
'sex': row_list[16].title(),
'district': row_list[18].title()}
return info
def district_line(document, district):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run(f'District: {district}').bold = True
def approval_line(document):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run('Selection: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
p.add_run('Background: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
def case_number_line(document, case_number):
p = document.add_paragraph()
p.add_run(f'Case Number: {case_number}')
def name_line(document, name):
p = document.add_paragraph()
p.add_run(f'Name: {name}')
def bio_line(document, sex, race, dob, age):
lines = ['Sex:\t', 'Race:\t', 'DOB:\t', 'Age:\t']
bio_list = [sex, race, dob, age]
p = document.add_paragraph()
for line, bio in zip(lines, bio_list):
p.add_run(f'{line}{bio}')
p.add_run().add_break()
def charge_line(document):
lines = ['Charge Type: State | Municipal',
'Description:', 'Court Date:', 'Citation#:']
p = document.add_paragraph()
for line in lines:
p.add_run(line)
p.add_run().add_break()
def address_line(document, address):
p = document.add_paragraph()
p.add_run(f'Address: {address}')
def phone_line(document, phone):
p = document.add_paragraph()
p.add_run(f'Phone: {phone}')
p.add_run().add_break()
p.add_run('Email:')
def background_line(document):
lines = ['Court Records:', 'Out of State Records:',
'Local Records:', 'Notes:']
for line in lines:
p = document.add_paragraph()
p.add_run(line).bold = True
def last_name_first(name):
suffix = ['II', 'IV', 'JR', 'SR']
name_list = name.split()
name_list.insert(0, name_list.pop())
if name_list[0][:2].upper() in suffix:
name_list.insert(0, name_list.pop())
name = "_".join(name_list)
return name
def save_facesheet(document, directory, name, district, district_folders):
name = last_name_first(name)
if district_folders:
path = f'{directory}/results/{district}/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{district}/{name}'):
os.makedirs(f'{directory}/results/{district}/{name}')
else:
path = f'{directory}/results/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{name}'):
os.makedirs(f'{directory}/results/{name}')
document.save(path)
def assemble_sheet(row_list, directory, district_folders):
info_dict = parse_row(row_list)
document = Document()
district_line(document, info_dict['district'])
approval_line(document)
case_number_line(document, info_dict['case_number'])
name_line(document, info_dict['name'])
bio_line(document, info_dict['sex'], info_dict['race'], info_dict['DOB'], info_dict['age'])
charge_line(document)
address_line(document, info_dict['address'])
phone_line(document, info_dict['phone'])
background_line(document)
save_facesheet(document, directory, info_dict['name'], info_dict['district'], district_folders)
def main():
pass
if __name__ == '__main__':
main()
|
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2010 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
'''
Created on
26.01.11
@author: Zhitko V.A.
'''
import pm
import msession
import time
session = msession.MThreadSession(pm.get_session())
segment = session.open_segment(u"/etc/nsm")
class nsm:
info = "NSM Keynodes"
goals = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/goals")
attr_confirmed = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirmed_")
attr_active = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/active_")
attr_confirm_ = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirm_")
attr_search = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/search_")
attr_searched = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/searched_")
attr_generate = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generate_")
attr_generated = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generated_")
result = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/Result")
nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command")
attr_nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_")
attr_nsm_command_pattern = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_pattern_")
attr_nsm_command_elem = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_elem_")
attr_nsm_command_comment = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_comment_")
attr_nsm_command_shortname = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_shortname_")
n_1 = session.find_keynode_full_uri(u"/proc/keynode/1_")
n_2 = session.find_keynode_full_uri(u"/proc/keynode/2_")
attr = {
0:session.find_keynode_full_uri(u"/proc/keynode/1_"),
1:session.find_keynode_full_uri(u"/proc/keynode/2_"),
2:session.find_keynode_full_uri(u"/proc/keynode/3_"),
3:session.find_keynode_full_uri(u"/proc/keynode/4_"),
4:session.find_keynode_full_uri(u"/proc/keynode/5_"),
5:session.find_keynode_full_uri(u"/proc/keynode/6_"),
6:session.find_keynode_full_uri(u"/proc/keynode/7_"),
7:session.find_keynode_full_uri(u"/proc/keynode/8_"),
8:session.find_keynode_full_uri(u"/proc/keynode/9_"),
9:session.find_keynode_full_uri(u"/proc/keynode/10_")
}
def initNSM(ses):
global session
session = ses
global segment
segment = session.open_segment(u"/etc/nsm")
global nsm
nsm.goals = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/goals")
nsm.attr_confirmed = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirmed_")
nsm.attr_active = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/active_")
nsm.attr_confirm_ = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirm_")
nsm.attr_search = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/search_")
nsm.attr_searched = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/searched_")
nsm.attr_generate = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generate_")
nsm.attr_generated = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generated_")
nsm.result = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/Result")
nsm.nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command")
nsm.attr_nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_")
nsm.attr_nsm_command_pattern = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_pattern_")
nsm.attr_nsm_command_elem = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_elem_")
nsm.attr_nsm_command_comment = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_comment_")
nsm.attr_nsm_command_shortname = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_shortname_")
nsm.n_1 = session.find_keynode_full_uri(u"/proc/keynode/1_")
nsm.n_2 = session.find_keynode_full_uri(u"/proc/keynode/2_")
nsm.attr = {
0:session.find_keynode_full_uri(u"/proc/keynode/1_"),
1:session.find_keynode_full_uri(u"/proc/keynode/2_"),
2:session.find_keynode_full_uri(u"/proc/keynode/3_"),
3:session.find_keynode_full_uri(u"/proc/keynode/4_"),
4:session.find_keynode_full_uri(u"/proc/keynode/5_"),
5:session.find_keynode_full_uri(u"/proc/keynode/6_"),
6:session.find_keynode_full_uri(u"/proc/keynode/7_"),
7:session.find_keynode_full_uri(u"/proc/keynode/8_"),
8:session.find_keynode_full_uri(u"/proc/keynode/9_"),
9:session.find_keynode_full_uri(u"/proc/keynode/10_")
}
def madeNewNSMCommand(sc_pattern_set,
command_elem_list = [],
str_command_short_name = "",
str_command_comment = ""):
print "[NSM] Register new NSM command"
# создание узла нсм комманды
sc_nsm_command = session.create_el(segment, pm.SC_N_CONST)
session.gen3_f_a_f(segment,nsm.nsm_command,sc_nsm_command, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создание узла связки нсм комманды
sc_nsm_command_sheaf = session.create_el(segment, pm.SC_N_CONST)
# соединяем узлы, под атрибутом attr_nsm_command_
arc = session.gen3_f_a_f(segment, sc_nsm_command, sc_nsm_command_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел шаблона поиска нсм комманды
sc_nsm_pattern = session.create_el(segment, pm.SC_N_CONST)
# добавляем узел в нсм комманду под атрибутом
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf, sc_nsm_pattern, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_pattern, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# копируем шаблон поиска в нсм комманду
q = session.copySetToSet(segment,sc_pattern_set,sc_nsm_pattern)
# создаем узел параметров нсм комманды
sc_nsm_command_elem = session.create_el(segment, pm.SC_N_CONST)
# добавляем узел в нсм комманду под атрибутом
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_elem, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_elem, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# копируем атрибуты комманды
for i, el in enumerate(command_elem_list):
if i < 10:
arc = session.gen3_f_a_f(segment, sc_nsm_command_elem, el, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr[i], arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел краткого названия нсм комманды и добавляем его
sc_nsm_command_short_name = session.create_el(segment, pm.SC_N_CONST)
session.set_content_str(sc_nsm_command_short_name, str_command_short_name)
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_short_name, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_shortname, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел комментария нсм комманды и добавляем его
sc_nsm_command_comment = session.create_el(segment, pm.SC_N_CONST)
session.set_content_str(sc_nsm_command_comment, str_command_comment)
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_comment, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_comment, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_command
def runNSMCommandWithParams(sc_nsm_command,
command_elem_list = [],
search = True):
#print "[NSM] run NSM command with params"
sc_nsm_request = session.create_el(segment, pm.SC_N_CONST)
session.gen3_f_a_f(segment, nsm.goals, sc_nsm_request, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
sc_nsm_request_sheaf = session.create_el(segment, pm.SC_N_CONST)
arc_sheaf = session.gen3_f_a_f(segment, sc_nsm_request, sc_nsm_request_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
for i, el in enumerate(command_elem_list):
arc = session.gen3_f_a_f(segment, sc_nsm_request_sheaf, el, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[2]
session.gen3_f_a_f(segment, nsm.attr[i], arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, sc_nsm_command, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_active, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
if search:
session.gen3_f_a_f(segment, nsm.attr_search, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
else:
session.gen3_f_a_f(segment, nsm.attr_generate, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_request
def runNSMwithPattern(sc_pattern,
search = True, patternName = None):
#print "[NSM] run NSM with pattern"
sc_nsm_request = session.create_el(segment, pm.SC_N_CONST)
if patternName is not None:
session.set_content_str(sc_nsm_request, patternName)
session.gen3_f_a_f(segment, nsm.goals, sc_nsm_request, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
sc_nsm_request_sheaf = session.create_el(segment, pm.SC_N_CONST)
arc_sheaf = session.gen3_f_a_f(segment, sc_nsm_request, sc_nsm_request_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
pat_els = session.search3_f_a_a(sc_pattern, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL, pm.SC_EMPTY)
for el in pat_els:
session.gen3_f_a_f(segment, sc_nsm_request_sheaf, el[2], pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_active, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_confirm_, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
if search:
session.gen3_f_a_f(segment, nsm.attr_search, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
else:
session.gen3_f_a_f(segment, nsm.attr_generate, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_request
def getNSMRequestScResult(sc_nsm_request, wait_for_result = True, wait_time = 0.1):
print "[NSM] search for NSM request result"
# wait for searched_
res = session.search5_f_a_a_a_f(sc_nsm_request,
pm.SC_A_CONST|pm.SC_POS,
pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
nsm.attr_searched)
while not res:
if wait_for_result:
print "[NSM] wait for result"
time.sleep(wait_time)
else:
return None
res = session.search5_f_a_a_a_f(sc_nsm_request,
pm.SC_A_CONST|pm.SC_POS,
pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
nsm.attr_searched)
# search for confirmed_
sc_nsm_arc_sheaf = res[0][1]
res = session.search3_f_a_f(nsm.attr_confirmed,
pm.SC_A_CONST|pm.SC_POS,
sc_nsm_arc_sheaf)
if not res:
print "[nsm] no any results found"
return None
res = session.search3_a_a_f(pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
sc_nsm_arc_sheaf)
for set in res:
if session.search3_f_a_f(nsm.result,pm.SC_A_CONST|pm.SC_POS,set[0]):
print "[NSM] find result"
return set[0]
print "[nsm] no any results found"
return None
def convertNsmResult2SimpleSet(sc_nsm_result):
res = []
result_variants = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if result_variants is None: return None
for res_variant in result_variants:
cur_element_sheafs = session.search3_f_a_a(res_variant[2], pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if not cur_element_sheafs: continue
#print cur_element_sheafs
for cur_element_sheaf in cur_element_sheafs:
#print cur_element_sheaf
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_2)
if not cur_find_element: continue
res.append(cur_find_element[0][2])
const_elements = getConstPatternElsByScResult(sc_nsm_result)
if const_elements:
res = res + const_elements
return res
def convertNsmResult2Sets(sc_nsm_result):
res = []
result_variants = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if result_variants is None: return None
for res_variant in result_variants:
v_res = []
cur_element_sheafs = session.search3_f_a_a(res_variant[2], pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if not cur_element_sheafs: continue
for cur_element_sheaf in cur_element_sheafs:
s_res = []
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_1)
if not cur_find_element: continue
s_res.append(cur_find_element[0][2])
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_2)
if not cur_find_element: continue
s_res.append(cur_find_element[0][2])
v_res.append(s_res)
res.append(v_res)
return res
def getConstPatternElsByScResult(sc_nsm_result):
temp = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_A_CONST|pm.SC_POS)
if not temp: return None
print temp
sheafArc = temp[0][2]
print sheafArc
sc_pattern = session.search3_a_f_a(pm.SC_N_CONST, sheafArc, pm.SC_N_CONST)[0][2]
consts = session.search3_f_a_a(sc_pattern, pm.SC_A_CONST|pm.SC_POS, pm.SC_CONST)
res = []
for els in consts:
res.append(els[2])
if len(res) is 0: return None
return res
|
__author__ = 'Viktor Kerkez <[email protected]>'
__contact__ = '[email protected]'
__date__ = '20 April 2010'
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
import logging
from django import forms
from django.conf import settings
from google.appengine.api import mail
# perart imports
from perart import models
class PerArtForm(forms.ModelForm):
tinymce = True
class ProgramForm(PerArtForm):
class Meta:
model = models.Program
exclude = ['url']
class ProjectForm(PerArtForm):
class Meta:
model = models.Project
exclude = ['url']
class NewsForm(PerArtForm):
class Meta:
model = models.News
exclude = ['url']
class MenuForm(PerArtForm):
tinymce = False
class Meta:
model = models.Menu
exclude = ['url']
class GalleryForm(PerArtForm):
class Meta:
model = models.Gallery
exclude = ['url']
class NewsletterForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
def send_email(self):
try:
mail.send_mail(sender='[email protected]',
to=settings.PERART_EMAIL,
subject='"%(name)s" se prijavio za newsletter' % self.cleaned_data,
body='Ime: %(name)s\nEmail: %(email)s' % self.cleaned_data)
return True
except:
logging.exception('sending message failed')
return False
|
# -*- coding: utf-8 -*-
from kivy.uix.screenmanager import Screen
#from kivy.lang import Builder
from models.senhas import Senha, Collection
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from telas.utilities import Confirma, JanelaSettings
import sys
class JanelaCollect (Screen):
def __init__(self, smanager=None, last_window=None, **kwargs):
super(JanelaCollect, self).__init__(**kwargs)
self.last_window = last_window
self.ids.area_collects.bind(minimum_height=self.ids.area_collects.setter('height'))
self.smanager = smanager
def recarrega (self):
self.ids.area_collects.clear_widgets()
cols = Collection.select()
for c in cols:
b = ItemColecao (c, smanager=self.smanager)
self.ids.area_collects.add_widget(b)
def on_pre_enter(self):
self.recarrega()
def on_leave(self):
self.smanager.remove_widget (self)
def call_settings (self):
from telas.collect import JanelaSettings
janela = JanelaSettings(smanager=self.smanager, name='janela_settings')
self.smanager.add_widget( janela )
#janela = self.smanager.get_screen('janela_add_collect')
self.smanager.transition.direction = 'left'
self.smanager.current = 'janela_settings'
def add (self):
from telas.collect import JanelaAddCollect
janela = JanelaAddCollect(smanager=self.smanager, name='janela_add_collect')
self.smanager.add_widget( janela )
#janela = self.smanager.get_screen('janela_add_collect')
self.smanager.transition.direction = 'left'
self.smanager.current = 'janela_add_collect'
def voltar (self):
sys.exit(0)
class ItemColecao (Button):
def __init__ (self, col, smanager=None, **kwargs):
super(ItemColecao, self).__init__(**kwargs)
self.collection = col
self.smanager = smanager
self.text = self.smanager.encrypter.decripta (col.nome)
def on_release (self, **kwargs):
super(ItemColecao, self).on_release(**kwargs)
from telas.passwd import JanelaPassList
janela = JanelaPassList( smanager=self.smanager, name='janela_pass_list')
self.smanager.add_widget( janela )
#janela = self.smanager.get_screen('janela_pass_list')
janela.setup (col=self.collection)
self.smanager.transition.direction = 'left'
self.smanager.current = 'janela_pass_list'
class JanelaAddCollect (Screen):
def __init__(self, smanager=None, last_window=None, **kwargs):
super(JanelaAddCollect, self).__init__(**kwargs)
self.last_window = last_window
self.smanager = smanager
def on_pre_enter(self):
self.ids.espaco_superior.remove_widget (self.ids.button_deleta)
self.ids.tx_nome.text = ''
def on_leave (self):
self.smanager.remove_widget(self)
def salvar (self):
c = Collection()
c.nome = self.smanager.encrypter.encripta (self.ids.tx_nome.text )
c.save()
# Vai pra view
#janela = self.smanager.get_screen('janela_pass_list')
from telas.passwd import JanelaPassList
janela = JanelaPassList( smanager=self.smanager, name='janela_pass_list')
self.smanager.add_widget( janela )
janela.setup (col=c)
self.smanager.transition.direction = 'right'
self.smanager.current = 'janela_pass_list'
def voltar (self):
from telas.collect import JanelaCollect
janela = JanelaCollect(smanager=self.smanager, name='janela_collect')
self.smanager.add_widget( janela )
janela.recarrega()
self.smanager.transition.direction = 'right'
self.smanager.current = 'janela_collect'
class JanelaEditCollect (JanelaAddCollect):
def setup (self, col):
self.collect = col
def on_pre_enter(self):
self.ids.tx_nome.text = self.smanager.encrypter.decripta (self.collect.nome)
def on_leave (self):
self.smanager.remove_widget(self)
def _really_delete(self, really):
if really:
self.collect.delete_instance(recursive=True)
self.voltar()
def delete (self):
p = Confirma (callback=self._really_delete, text='Remover Colecao?')
p.open()
def salvar (self):
c = self.collect
c.nome = self.smanager.encrypter.encripta (self.ids.tx_nome.text)
c.save()
# Vai pra view
#janela = self.smanager.get_screen('janela_pass_list')
from telas.passwd import JanelaPassList
janela = JanelaPassList( smanager=self.smanager, name='janela_pass_list')
self.smanager.add_widget( janela )
janela.setup (col=c)
self.smanager.transition.direction = 'right'
self.smanager.current = 'janela_pass_list'
#self.smanager.switch_to = 'janela_pass_list'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Pedro Arroyo M <[email protected]>
# Copyright (C) 2015 Mall Connection(<http://www.mallconnection.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class hr_family_responsibilities(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.family.responsibilities'
_description = 'openerpmodel'
_columns = {
'name':fields.char('Name', size=64, required=True, readonly=False),
'type':fields.selection([
('simple','simple responsibility'),
('maternal','maternal responsibility'),
('invalid','invalid responsibility'),
], 'State', select=True),
'relationship':fields.selection([
('father','father'),
('son','son / daughter'),
('spouse','spouse'),
('Father in law','Father in law / mother in law'),
('son','son / daughter'),
('second','second'),
('Grandfather','Grandfather / Grandmother'),
('grandchild','grandchild / granddaughter'),
('sister','sister / brother'),
('brother in law','brother in law / sister in law'),
], 'Relationship', select=True, readonly=False),
'vat': fields.char('TIN', size=32, help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'employee_id': fields.many2one('hr.employee', string='Employee'),
}
hr_family_responsibilities()
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2018 Chris Lamb <[email protected]>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
from diffoscope.comparators.gnumeric import GnumericFile
from ..utils.data import load_fixture, get_data
from ..utils.tools import skip_unless_tools_exist
from ..utils.nonexisting import assert_non_existing
gnumeric1 = load_fixture('test1.gnumeric')
gnumeric2 = load_fixture('test2.gnumeric')
def test_identification(gnumeric1):
assert isinstance(gnumeric1, GnumericFile)
def test_no_differences(gnumeric1):
difference = gnumeric1.compare(gnumeric1)
assert difference is None
@pytest.fixture
def differences(gnumeric1, gnumeric2):
return gnumeric1.compare(gnumeric2).details
@skip_unless_tools_exist('ssconvert')
def test_diff(differences):
expected_diff = get_data('gnumeric_expected_diff')
assert differences[0].unified_diff == expected_diff
@skip_unless_tools_exist('ssconvert')
def test_compare_non_existing(monkeypatch, gnumeric1):
assert_non_existing(monkeypatch, gnumeric1, has_null_source=False)
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_filename):
from genomicode import filelib
from genomicode import SimpleVariantMatrix
from genomicode import AnnotationMatrix
simple_file = in_data.identifier
metadata = {}
# Read all in memory. Hopefully, not too big.
ds = []
for d in filelib.read_row(simple_file, header=-1):
ds.append(d)
#if len(ds) > 50000: # DEBUG
# break
# MuSE sometimes has alternates.
# Alt A,C
# Num_Alt 13,0
# VAF 0.19,0.0
# Detect this and fix it. Take the alternate with the highest VAF.
for d in ds:
if d.Num_Alt.find(",") < 0:
continue
x1 = d.Num_Alt.split(",")
x2 = d.VAF.split(",")
assert len(x1) == len(x2)
x1 = map(int, x1)
x2 = map(float, x2)
max_vaf = max_i = None
for i in range(len(x2)):
if max_vaf is None or x2[i] > max_vaf:
max_vaf = x2[i]
max_i = i
assert max_i is not None
d.Num_Alt = str(x1[max_i])
d.VAF = str(x2[max_i])
# Make a list of all the positions.
positions = {} # (Chrom, Pos) -> 1
for d in ds:
positions[(d.Chrom, int(d.Pos))] = 1
positions = sorted(positions)
# Make a list of all the callers.
callers = {}
for d in ds:
callers[d.Caller] = 1
callers = sorted(callers)
# Make a list of all the samples.
samples = {}
for d in ds:
samples[d.Sample] = 1
samples = sorted(samples)
# Make a list of the coordinates.
coord_data = {}
for d in ds:
x = d.Chrom, int(d.Pos), d.Ref, d.Alt
coord_data[x] = 1
coord_data = sorted(coord_data)
# Make a list of all DNA calls.
call_data = []
for d in ds:
assert d.Source in ["DNA", "RNA"]
if d.Source != "DNA":
continue
num_ref = num_alt = vaf = None
if d.Num_Ref:
num_ref = int(d.Num_Ref)
if d.Num_Alt:
num_alt = int(d.Num_Alt)
if d.VAF:
vaf = float(d.VAF)
if num_ref is None and num_alt is None and vaf is None:
continue
call = SimpleVariantMatrix.Call(num_ref, num_alt, vaf)
x = d.Chrom, int(d.Pos), d.Ref, d.Alt, d.Sample, d.Caller, call
call_data.append(x)
# sample -> caller -> chrom, pos, ref, alt -> call
samp2caller2coord2call = {}
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2caller2coord2call:
samp2caller2coord2call[sample] = {}
caller2coord2call = samp2caller2coord2call[sample]
if caller not in caller2coord2call:
caller2coord2call[caller] = {}
coord2call = caller2coord2call[caller]
# A (sample, caller, coord) may have multiple calls. For
# example, for germline samples that are called with each
# tumor sample. If this is the case, then take the call
# with the highest coverage.
if coord in coord2call:
old_call = coord2call[coord]
cov = old_cov = None
if call.num_ref is not None and call.num_alt is not None:
cov = call.num_ref + call.num_alt
if old_call.num_ref is not None and \
old_call.num_alt is not None:
old_cov = old_call.num_ref + old_call.num_alt
if cov is None and old_cov is not None:
call = old_call
elif cov is not None and old_cov is not None and cov < old_cov:
call = old_call
coord2call[coord] = call
# Count the number of callers that called a variant at each
# position for each sample.
samp2coord2caller = {} # sample -> chrom, pos, ref, alt -> caller -> 1
# Need to do this first, to make sure each caller is counted
# at most once. This is to account for germline samples that
# is called by each caller multiple times.
for x in call_data:
chrom, pos, ref, alt, sample, caller, call = x
coord = chrom, pos, ref, alt
if sample not in samp2coord2caller:
samp2coord2caller[sample] = {}
if coord not in samp2coord2caller[sample]:
samp2coord2caller[sample][coord] = {}
samp2coord2caller[sample][coord][caller] = 1
samp2coord2nc = {} # sample -> chrom, pos, ref, alt -> num_callers
for sample in samp2coord2caller:
samp2coord2nc[sample] = {}
for coord in samp2coord2caller[sample]:
samp2coord2nc[sample][coord] = len(
samp2coord2caller[sample][coord])
#for x in call_data:
# chrom, pos, ref, alt, sample, caller, call = x
# coord = chrom, pos, ref, alt
# if sample not in samp2coord2nc:
# samp2coord2nc[sample] = {}
# nc = samp2coord2nc[sample].get(coord, 0) + 1
# samp2coord2nc[sample][coord] = nc
# Format everything into an annotation matrix.
headers0 = []
headers1 = []
headers2 = []
all_annots = []
# Add the positions.
headers0 += ["", "", "", ""]
headers1 += ["", "", "", ""]
headers2 += ["Chrom", "Pos", "Ref", "Alt"]
for i in range(4):
x = [x[i] for x in coord_data]
x = [str(x) for x in x]
all_annots.append(x)
# Add the number of callers information.
headers0 += ["Num Callers"] * len(samples)
headers1 += [""] * len(samples)
headers2 += samples
for sample in samples:
annots = []
for coord in coord_data:
nc = samp2coord2nc.get(sample, {}).get(coord, "")
annots.append(nc)
all_annots.append(annots)
# Add information about calls.
for sample in samples:
caller2coord2call = samp2caller2coord2call.get(sample, {})
for i, caller in enumerate(callers):
h0 = ""
if not i:
h0 = sample
h1 = caller
h2 = "Ref/Alt/VAF"
headers0.append(h0)
headers1.append(h1)
headers2.append(h2)
coord2call = caller2coord2call.get(caller, {})
annots = []
for coord in coord_data:
x = ""
call = coord2call.get(coord)
if call:
x = SimpleVariantMatrix._format_call(call)
annots.append(x)
all_annots.append(annots)
# Set the headers.
assert len(headers0) == len(headers1)
assert len(headers0) == len(headers2)
assert len(headers0) == len(all_annots)
headers = [None] * len(headers0)
for i, x in enumerate(zip(headers0, headers1, headers2)):
x = "___".join(x)
headers[i] = x
matrix = AnnotationMatrix.create_from_annotations(headers, all_annots)
SimpleVariantMatrix.write_from_am(out_filename, matrix)
#annot_header = ["Chrom", "Pos", "Ref", "Alt"]
#matrix = SimpleVariantMatrix.make_matrix(
# samples, callers, annot_header, coord_data, named_data,
# call_data)
#SimpleVariantMatrix.write(out_filename, matrix)
return metadata
def name_outfile(self, antecedents, user_options):
return "calls.txt"
|
'''
xfilesharing XBMC Plugin
Copyright (C) 2013-2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cloudservice
import os
import re
import urllib, urllib2
import cookielib
import xbmc, xbmcaddon, xbmcgui, xbmcplugin
# global variables
PLUGIN_NAME = 'plugin.video.cloudstream'
PLUGIN_URL = 'plugin://'+PLUGIN_NAME+'/'
ADDON = xbmcaddon.Addon(id=PLUGIN_NAME)
# helper methods
def log(msg, err=False):
if err:
xbmc.log(ADDON.getAddonInfo('name') + ': ' + msg, xbmc.LOGERROR)
else:
xbmc.log(ADDON.getAddonInfo('name') + ': ' + msg, xbmc.LOGDEBUG)
#
#
#
class xfilesharing(cloudservice.cloudservice):
# magic numbers
MEDIA_TYPE_VIDEO = 1
MEDIA_TYPE_FOLDER = 0
##
# initialize (setting 1) username, 2) password, 3) authorization token, 4) user agent string
##
def __init__(self, name, domain, user, password, auth, user_agent):
return super(xfilesharing,self).__init__(name, domain, user, password, auth, user_agent)
#return cloudservice.__init__(self,domain, user, password, auth, user_agent)
##
# perform login
##
def login(self):
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
# default User-Agent ('Python-urllib/2.6') will *not* work
opener.addheaders = [('User-Agent', self.user_agent)]
if self.domain == 'uptostream.com':
self.domain = 'uptobox.com'
if 'http://' in self.domain:
url = self.domain
else:
url = 'http://' + self.domain + '/'
values = {
'op' : 'login',
'login' : self.user,
'redirect' : url,
'password' : self.password
}
# try login
try:
response = opener.open(url,urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403:
#login denied
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30017))
log(str(e), True)
return
response_data = response.read()
response.close()
loginResult = False
#validate successful login
for r in re.finditer('my_account',
response_data, re.DOTALL):
loginResult = True
#validate successful login
for r in re.finditer('logout',
response_data, re.DOTALL):
loginResult = True
if (loginResult == False):
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30017))
log('login failed', True)
return
for cookie in self.cookiejar:
for r in re.finditer(' ([^\=]+)\=([^\s]+)\s',
str(cookie), re.DOTALL):
cookieType,cookieValue = r.groups()
if cookieType == 'xfss':
self.auth = cookieValue
if cookieType == 'xfsts':
self.auth = cookieValue
return
##
# return the appropriate "headers" for FireDrive requests that include 1) user agent, 2) authorization cookie
# returns: list containing the header
##
def getHeadersList(self,referer=''):
if ((self.auth != '' or self.auth != 0) and referer == ''):
return { 'User-Agent' : self.user_agent, 'Cookie' : 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';' }
elif (self.auth != '' or self.auth != 0):
return { 'User-Agent' : self.user_agent, 'Referer': referer, 'Cookie' : 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';' }
else:
return { 'User-Agent' : self.user_agent }
##
# return the appropriate "headers" for FireDrive requests that include 1) user agent, 2) authorization cookie
# returns: URL-encoded header string
##
def getHeadersEncoded(self, referer=''):
return urllib.urlencode(self.getHeadersList(referer))
##
# retrieve a list of videos, using playback type stream
# parameters: prompt for video quality (optional), cache type (optional)
# returns: list of videos
##
def getVideosList(self, folderID=0, cacheType=0):
if 'http://' in self.domain:
url = self.domain
else:
url = 'http://' + self.domain
if 'streamcloud.eu' in self.domain:
url = url + '/'
# retrieve all documents
if folderID == 0:
url = url+'?op=my_files'
else:
url = url+'?op=my_files&fld_id='+folderID
videos = {}
if True:
req = urllib2.Request(url, None, self.getHeadersList())
# if action fails, validate login
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return
else:
log(str(e), True)
return
response_data = response.read()
response.close()
for r in re.finditer('placeholder\=\"(Username)\" id\=i\"(nputLoginEmail)\" name\=\"login\"' ,
response_data, re.DOTALL):
loginUsername,loginUsernameName = r.groups()
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return
response_data = response.read()
response.close()
# parsing page for videos
# video-entry
for r in re.finditer('<a id="([^\"]+)" href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
fileID,url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
for r in re.finditer('<input type="checkbox" name="file_id".*?<a href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
# video-entry - bestream
for r in re.finditer('<TD align=left>[^\<]+<a href="([^\"]+)">([^\<]+)</a>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
# video-entry - uptobox
for r in re.finditer('<td style="[^\"]+"><a href="([^\"]+)".*?>([^\<]+)</a></td>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
# streaming
videos[fileName] = {'url': 'plugin://plugin.video.cloudstream?mode=streamURL&instance='+self.instanceName+'&url=' + url, 'mediaType' : self.MEDIA_TYPE_VIDEO}
if 'realvid.net' in self.domain:
for r in re.finditer('<a href="[^\"]+">([^\<]+)</a>\s+</TD>' ,
response_data, re.DOTALL):
url,fileName = r.groups()
#flatten folders (no clean way of handling subfolders, so just make the root list all folders & subfolders
#therefore, skip listing folders if we're not in root
# if folderID == 0:
# folder-entry
# for r in re.finditer('<a href=".*?fld_id=([^\"]+)"><b>([^\<]+)</b></a>' ,
# folderID = 0
# for r in re.finditer('<option value="(\d\d+)">([^\<]+)</option>' ,
# response_data, re.DOTALL):
# folderID,folderName = r.groups()
#remove from folderName
# folderName = re.sub('\ \;', '', folderName)
# folder
# if int(folderID) != 0:
# videos[folderName] = {'url': 'plugin://plugin.video.cloudstream?mode=folder&instance='+self.instanceName+'&folderID=' + folderID, 'mediaType' : self.MEDIA_TYPE_FOLDER}
# if folderID == 0:
for r in re.finditer('<a href=".*?fld_id=([^\"]+)"><b>([^\<]+)</b></a>' ,
response_data, re.DOTALL):
folderID,folderName = r.groups()
# folder
if int(folderID) != 0 and folderName != ' . . ':
videos[folderName] = {'url': 'plugin://plugin.video.cloudstream?mode=folder&instance='+self.instanceName+'&folderID=' + folderID, 'mediaType' : self.MEDIA_TYPE_FOLDER}
return videos
##
# retrieve a video link
# parameters: title of video, whether to prompt for quality/format (optional), cache type (optional)
# returns: list of URLs for the video or single URL of video (if not prompting for quality)
##
def getPublicLink(self,url,cacheType=0):
fname = ''
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent)]
req = urllib2.Request(url)
try:
response = opener.open(req)
except urllib2.URLError, e:
pass
response.close()
url = response.url
# opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar), MyHTTPErrorProcessor)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent), ('Referer', url), ('Cookie', 'lang=english; login='+self.user+'; xfsts='+self.auth+'; xfss='+self.auth+';')]
req = urllib2.Request(url)
# if action fails, validate login
try:
response = opener.open(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = opener.open(req)
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
for r in re.finditer('\<title\>([^\<]+)\<',
response_data, re.DOTALL | re.I):
title = r.group(1)
if fname == '':
fname = title
url = response.url
req = urllib2.Request(url)
for r in re.finditer('name\=\"(code)\" class\=\"(captcha_code)' ,
response_data, re.DOTALL):
loginUsername,loginUsernameName = r.groups()
self.login()
req = urllib2.Request(url, None, self.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
if self.domain == 'vidzi.tv':
for r in re.finditer('(file)\: \"([^\"]+)\.mp4\"' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
return (streamURL + '.mp4', fname)
confirmID = 0
values = {}
# fetch video title, download URL and docid for stream link
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer = r.groups()
values = {
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'method_free' : 'Free Download'
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="hash" value="([^\"]*)">.*?<input type="submit" name="imhuman" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer,hash,submit = r.groups()
values = {
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'hash' : hash,
'imhuman' : submit
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="hash" value="([^\"]*)">.*?<input type="hidden" name="inhu" value="([^\"]*)">.*?<input type="submit" name="imhuman" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
op,usr_login,id,fname,referer,hash,inhu,submit = r.groups()
values = {
'_vhash' : 'i1102394cE',
'gfk' : 'i22abd2449',
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'hash' : hash,
'inhu' : inhu,
'imhuman' : submit
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
for r in re.finditer('<input type="hidden" name="ipcount_val" id="ipcount_val" value="([^\"]+)">.*?<input type="hidden" name="op" value="([^\"]+)">.*? <input type="hidden" name="usr_login" value="([^\"]*)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="fname" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">' ,response_data, re.DOTALL):
ipcount,op,usr_login,id,fname,referer = r.groups()
values = {
'ipcount_val' : ipcount,
'op' : op,
'usr_login' : usr_login,
'id' : id,
'fname' : fname,
'referer' : referer,
'method_free' : 'Slow access'
}
values = {}
variable = 'op'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'usr_login'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'id'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'fname'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'referer'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'hash'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'inhu'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'method_free'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'method_premium'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'rand'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'down_direct'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'file_size_real'
for r in re.finditer('<input type="(hidden)" name="'+variable+'" value="([^\"]*)">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'imhuman'
for r in re.finditer('<input type="(submit)" name="'+variable+'" value="([^\"]*)" id="btn_download">' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = 'gfk'
for r in re.finditer('(name): \''+variable+'\', value: \'([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
variable = '_vhash'
for r in re.finditer('(name): \''+variable+'\', value: \'([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
values[variable] = value
# values['referer'] = ''
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="plugins_are_not_allowed" value="([^\"]+)"/>.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,plugins,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'plugins_are_not_allowed' : plugins,
'method_free' : submit,
'download_direct' : 1
}
# req = urllib2.Request(url, urllib.urlencode(values), self.getHeadersList(url))
req = urllib2.Request(url)
if self.domain == 'thefile.me':
values['method_free'] = 'Free Download'
elif self.domain == 'sharesix.com':
values['method_free'] = 'Free'
elif 'streamcloud.eu' in self.domain:
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(10))
xbmc.sleep((int(10)+1)*1000)
elif self.domain == 'vidhog.com':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(15))
xbmc.sleep((int(15)+1)*1000)
elif self.domain == 'vidto.me':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(6))
xbmc.sleep((int(6)+1)*1000)
elif self.domain == 'vodlocker.com':
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(3))
xbmc.sleep((int(3)+1)*1000)
elif self.domain == 'hcbit.com':
try:
# response = urllib2.urlopen(req)
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('', '')
else:
log(str(e), True)
return ('', '')
try:
if response.info().getheader('Location') != '':
return (response.info().getheader('Location') + '|' + self.getHeadersEncoded(url), fname)
except:
for r in re.finditer('\'(file)\'\,\'([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
return (streamURL + '|' + self.getHeadersEncoded(url), fname)
for r in re.finditer('\<td (nowrap)\>([^\<]+)\<\/td\>' ,response_data, re.DOTALL):
deliminator,fileName = r.groups()
for r in re.finditer('(\|)([^\|]{42})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://cloud1.hcbit.com/cgi-bin/dl.cgi/'+fileID+'/'+fileName
return (streamURL + '|' + self.getHeadersEncoded(url), fname)
if self.domain == 'bestreams.net':
file_id = ''
aff = ''
variable = 'file_id'
for r in re.finditer('\''+variable+'\', (\')([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
file_id = value
variable = 'aff'
for r in re.finditer('\''+variable+'\', (\')([^\']*)\'' ,response_data, re.DOTALL):
hidden,value = r.groups()
aff = value
xbmc.sleep((int(2)+1)*1000)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
opener.addheaders = [ ('User-Agent' , self.user_agent), ('Referer', url), ('Cookie', 'lang=1; file_id='+file_id+'; aff='+aff+';')]
elif self.domain == 'thevideo.me':
for r in re.finditer('\,\s+\'file\'\s+\:\s+\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL,fname)
elif self.domain == 'vidzi.tv':
for r in re.finditer('\s+file:\s+\"([^\"]+)\"',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL,fname)
# if action fails, validate login
try:
# response = urllib2.urlopen(req)
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
op=''
for r in re.finditer('<input type="hidden" name="op" value="([^\"]+)">.*?<input type="hidden" name="id" value="([^\"]+)">.*?<input type="hidden" name="rand" value="([^\"]*)">.*?<input type="hidden" name="referer" value="([^\"]*)">.*?<input type="hidden" name="method_free" value="([^\"]*)">' ,response_data, re.DOTALL):
op,id,rand,referer,submit = r.groups()
values = {
'op' : op,
'id' : id,
'rand' : rand,
'referer' : referer,
'method_free' : submit,
'download_direct' : 1
}
streamURL=''
title = ''
for r in re.finditer('\<(title)\>([^\>]*)\<\/title\>' ,response_data, re.DOTALL):
titleID,title = r.groups()
# for thefile
if self.domain == 'thefile.me':
downloadAddress = ''
for r in re.finditer('\<(img) src\=\"http\:\/\/([^\/]+)\/[^\"]+\" style' ,response_data, re.DOTALL):
downloadTag,downloadAddress = r.groups()
for r in re.finditer('(\|)([^\|]{56})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://'+str(downloadAddress)+'/d/'+fileID+'/video.mp4'
elif self.domain == 'sharerepo.com':
for r in re.finditer('(file)\: \'([^\']+)\'\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
for r in re.finditer('(\|)([^\|]{60})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://37.48.80.43/d/'+fileID+'/video.mp4?start=0'
elif self.domain == 'filenuke.com':
for r in re.finditer('(\|)([^\|]{56})\|' ,response_data, re.DOTALL):
deliminator,fileID = r.groups()
streamURL = 'http://37.252.3.244/d/'+fileID+'/video.flv?start=0'
elif self.domain == 'sharerepo.com':
for r in re.finditer('(file)\: \'([^\']+)\'\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
elif self.domain == 'letwatch.us':
for r in re.finditer('\[IMG\]http://([^\/]+)\/',
response_data, re.DOTALL):
IP = r.group(1)
for r in re.finditer('\|([^\|]{60})\|',
response_data, re.DOTALL):
fileID = r.group(1)
streamURL = 'http://'+IP+'/'+fileID+'/v.flv'
elif self.domain == 'thevideo.me':
for r in re.finditer('\,\s+\'file\'\s+\:\s+\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'vidto.me':
for r in re.finditer('var file_link = \'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'allmyvideos.net':
for r in re.finditer('\"file\" : \"([^\"]+)\"',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'realvid.net':
for r in re.finditer('file:\s?\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
elif self.domain == 'uptobox.com' or self.domain == 'uptostream.com':
for r in re.finditer('\<a href\=\"([^\"]+)\"\>\s+\<span class\=\"button_upload green\"\>',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL, fname)
for r in re.finditer('\<source src=\'([^\']+)\'',
response_data, re.DOTALL):
streamURL = r.group(1)
return (streamURL, fname)
timeout = 0
if op != "" and streamURL == '':
for r in re.finditer('Wait<strong><span id="(.*?)">(\d+)</span> seconds</strong>' ,response_data, re.DOTALL):
id,timeout = r.groups()
for r in re.finditer('<p class="(err)"><center><b>(.*?)</b>' ,response_data, re.DOTALL):
id,error = r.groups()
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), error)
return ('','')
req = urllib2.Request(url)
if timeout > 0:
xbmcgui.Dialog().ok(ADDON.getLocalizedString(30000), ADDON.getLocalizedString(30037) + str(timeout))
xbmc.sleep((int(timeout)+1)*1000)
# if action fails, validate login
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
self.login()
try:
response = opener.open(req, urllib.urlencode(values))
except urllib2.URLError, e:
log(str(e), True)
return ('','')
else:
log(str(e), True)
return ('','')
response_data = response.read()
response.close()
for r in re.finditer('<a href="([^\"]+)">(Click here to start your download)</a>' ,response_data, re.DOTALL):
streamURL,downloadlink = r.groups()
#vodlocker.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('(file)\: \"([^\"]+)"\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
if 'mp4' in streamURL:
break
# mightyupload.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('var (file_link) = \'([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# vidhog.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('(product_download_url)=([^\']+)\'' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# vidspot.net
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('"(file)" : "([^\"]+)"\,' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
# uploadc.com
if streamURL == '':
# fetch video title, download URL and docid for stream link
for r in re.finditer('\'(file)\',\'([^\']+)\'\)\;' ,response_data, re.DOTALL):
streamType,streamURL = r.groups()
streamURL = streamURL + '|' + self.getHeadersEncoded(url)
# return 'http://93.120.27.101:8777/pgjtbhuu6coammfvg5gfae6xogigs5cw6gsx3ey7yt6hmihwhpcixuiaqmza/v.mp4'
return (streamURL, fname)
class MyHTTPErrorProcessor(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# only add this line to stop 302 redirection.
if code == 302: return response
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
|
from __future__ import absolute_import
__author__ = 'noe'
def load_2well_discrete():
from .double_well_discrete import DoubleWell_Discrete_Data
return DoubleWell_Discrete_Data()
def get_bpti_test_data():
""" Returns a dictionary containing C-alpha coordinates of a truncated
BTBI trajectory.
Notes
-----
You will have to load the data from disc yourself. See eg.
:py:func:`pyemma.coordinates.load`.
Returns
-------
res : {trajs : list, top: str}
trajs is a list of filenames
top is str pointing to the path of the topology file.
"""
import os
from glob import glob
import pkg_resources
path = pkg_resources.resource_filename('pyemma.coordinates.tests', 'data/')
top = pkg_resources.resource_filename('pyemma.coordinates.tests', 'data/bpti_ca.pdb')
trajs = glob(path + os.sep + "*.xtc")
trajs = filter(lambda f: not f.endswith("bpti_mini.xtc"), trajs)
trajs = sorted(trajs)
assert len(trajs) == 3
return {'trajs': trajs, 'top': top}
|
#!/usr/bin/env python3
import pickle
import math
import numpy as np
import re
import sys
model_file = sys.argv[1]
trim_trailing_zeros = re.compile('0+p')
def small_hex(f):
hf = float(f).hex()
return trim_trailing_zeros.sub('p', hf)
def process_column(v, pad):
""" process and pad """
return [small_hex(f) for f in v] + [small_hex(0.0)] * pad
def cformatM(fh, name, X):
nrq = int(math.ceil(X.shape[1] / 4.0))
pad = nrq * 4 - X.shape[1]
lines = map(lambda v: ', '.join(process_column(v, pad)), X)
fh.write('float {}[] = {}\n'.format('__' + name, '{'))
fh.write('\t' + ',\n\t'.join(lines))
fh.write('};\n')
fh.write('_Mat {} = {}\n\t.nr = {},\n\t.nrq = {},\n\t.nc = {},\n\t.stride = {},\n\t.data.f = {}\n{};\n'.format('_' + name, '{', X.shape[1], nrq, X.shape[0], nrq * 4, '__' + name, '}'))
fh.write('const scrappie_matrix {} = &{};\n\n'.format(name, '_' + name))
def cformatV(fh, name, X):
nrq = int(math.ceil(X.shape[0] / 4.0))
pad = nrq * 4 - X.shape[0]
lines = ', '.join(list(map(lambda f: small_hex(f), X)) + [small_hex(0.0)] * pad)
fh.write('float {}[] = {}\n'.format('__' + name, '{'))
fh.write('\t' + lines)
fh.write('};\n')
fh.write('_Mat {} = {}\n\t.nr = {},\n\t.nrq = {},\n\t.nc = {},\n\t.stride = {},\n\t.data.f = {}\n{};\n'.format('_' + name, '{', X.shape[0], nrq, 1, nrq * 4, '__' + name, '}'))
fh.write('const scrappie_matrix {} = &{};\n\n'.format(name, '_' + name))
def reshape_lstmM(mat):
_, isize = mat.shape
return mat.reshape((-1, 4, isize)).transpose([1, 0, 2]).reshape((-1, isize))
def reshape_lstmV(mat):
return mat.reshape((-1, 4)).transpose().reshape(-1)
with open(model_file, 'rb') as fh:
network = pickle.load(fh, encoding='latin1')
assert network.version == 1, "Sloika model must be version 1. Perhaps you need to run Sloika's model_upgrade.py"
sys.stdout.write("""#pragma once
#ifndef NANONET_EVENTS_MODEL_H
#define NANONET_EVENTS_MODEL_H
#include "../util.h"
""")
""" First LSTM layer
"""
bilstm1 = network.sublayers[1]
lstm = bilstm1.sublayers[0]
cformatM(sys.stdout, 'lstmF1_iW', reshape_lstmM(lstm.iW.get_value()))
cformatM(sys.stdout, 'lstmF1_sW', reshape_lstmM(lstm.sW.get_value()))
cformatV(sys.stdout, 'lstmF1_b', reshape_lstmV(lstm.b.get_value().reshape(-1)))
cformatV(sys.stdout, 'lstmF1_p', lstm.p.get_value().reshape(-1))
lstm = bilstm1.sublayers[1].sublayers[0]
cformatM(sys.stdout, 'lstmB1_iW', reshape_lstmM(lstm.iW.get_value()))
cformatM(sys.stdout, 'lstmB1_sW', reshape_lstmM(lstm.sW.get_value()))
cformatV(sys.stdout, 'lstmB1_b', reshape_lstmV(lstm.b.get_value().reshape(-1)))
cformatV(sys.stdout, 'lstmB1_p', lstm.p.get_value().reshape(-1))
""" First feed forward layer
"""
size = network.sublayers[2].insize // 2
cformatM(sys.stdout, 'FF1_Wf', network.sublayers[2].W.get_value()[:, : size])
cformatM(sys.stdout, 'FF1_Wb', network.sublayers[2].W.get_value()[:, size : 2 * size])
cformatV(sys.stdout, 'FF1_b', network.sublayers[2].b.get_value())
""" Second LSTM layer
"""
bilstm2 = network.sublayers[3]
lstm = bilstm2.sublayers[0]
cformatM(sys.stdout, 'lstmF2_iW', reshape_lstmM(lstm.iW.get_value()))
cformatM(sys.stdout, 'lstmF2_sW', reshape_lstmM(lstm.sW.get_value()))
cformatV(sys.stdout, 'lstmF2_b', reshape_lstmV(lstm.b.get_value().reshape(-1)))
cformatV(sys.stdout, 'lstmF2_p', lstm.p.get_value().reshape(-1))
lstm = bilstm2.sublayers[1].sublayers[0]
cformatM(sys.stdout, 'lstmB2_iW', reshape_lstmM(lstm.iW.get_value()))
cformatM(sys.stdout, 'lstmB2_sW', reshape_lstmM(lstm.sW.get_value()))
cformatV(sys.stdout, 'lstmB2_b', reshape_lstmV(lstm.b.get_value().reshape(-1)))
cformatV(sys.stdout, 'lstmB2_p', lstm.p.get_value().reshape(-1))
""" Second feed forward layer
"""
size = network.sublayers[4].insize // 2
cformatM(sys.stdout, 'FF2_Wf', network.sublayers[4].W.get_value()[:, : size])
cformatM(sys.stdout, 'FF2_Wb', network.sublayers[4].W.get_value()[:, size : 2 * size])
cformatV(sys.stdout, 'FF2_b', network.sublayers[4].b.get_value())
""" Softmax layer
"""
nstate = network.sublayers[5].W.get_value().shape[0]
shuffle = np.append(np.arange(nstate - 1) + 1, 0)
cformatM(sys.stdout, 'FF3_W', network.sublayers[5].W.get_value()[shuffle])
cformatV(sys.stdout, 'FF3_b', network.sublayers[5].b.get_value()[shuffle])
sys.stdout.write('#endif /* NANONET_EVENTS_MODEL_H */')
|
# -*- coding: utf-8 -*-
'''
Performance Testing
Requirements:
pip install pyahocorasick
'''
import random
import string
import time
from py_aho_corasick import py_aho_corasick
import ahocorasick
rand_str = lambda n: ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
if __name__ == '__main__':
N = 1000000
text = rand_str(N)
keywords = list()
NW = 50000
for i in range(NW):
nw = random.randint(5,10)
kw = rand_str(nw)
keywords.append(kw)
# pyahocorasick
start_t = time.time()
A = ahocorasick.Automaton()
for idx, key in enumerate(keywords):
A.add_word(key, (idx, key))
A.make_automaton()
delta_build1 = time.time() - start_t
start_t = time.time()
cnt1 = 0
for end_index, (insert_order, original_value) in A.iter(text):
start_index = end_index - len(original_value) + 1
assert text[start_index:start_index + len(original_value)] == original_value
cnt1 += 1
delta_search1 = time.time() - start_t
# py_aho_corasick
start_t = time.time()
A = py_aho_corasick.Automaton(keywords)
delta_build2 = time.time() - start_t
start_t = time.time()
kv = A.get_keywords_found(text)
cnt2 = 0
for idx,k,v in kv:
assert text[idx:idx+len(k)] == k
cnt2 += 1
delta_search2 = time.time() - start_t
# brute force
start_t = time.time()
cnt3 = 0
for kw in keywords:
beg = 0
while beg < len(text):
idx = text.find(kw, beg)
if idx == -1:
break
else:
assert text[idx:idx+len(kw)] == kw
beg = idx + 1
cnt3 += 1
delta_search3 = time.time() - start_t
print(cnt1)
assert cnt1 == cnt2
assert cnt1 == cnt3
# output
print('pyahocorasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build1,delta_search1))
print('py_aho_corasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build2,delta_search2))
print('brute force: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,0,delta_search3))
|
from pyparsing import *
import re
ParserElement.enablePackrat()
from .tree import Node, Operator
import pdb
def rparser():
expr = Forward()
lparen = Literal("(").suppress()
rparen = Literal(")").suppress()
double = Word(nums + ".").setParseAction(lambda t:float(t[0]))
integer = pyparsing_common.signed_integer
number = pyparsing_common.number
ident = Word(initChars = alphas + "_", bodyChars = alphanums + "_" + ".")
string = dblQuotedString
funccall = Group(ident + lparen + Group(Optional(delimitedList(expr))) +
rparen + Optional(integer)).setResultsName("funccall")
operand = number | string | funccall | ident
expop = Literal('^')
multop = oneOf('* /')
plusop = oneOf('+ -')
introp = oneOf('| :')
expr << infixNotation(operand,
[(expop, 2, opAssoc.RIGHT),
(introp, 2, opAssoc.LEFT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),]).setResultsName('expr')
return expr
PARSER = rparser()
def parse(text):
def walk(l):
## ['log', [['cropland', '+', 1]]]
## ['poly', [['log', [['cropland', '+', 1]]], 3], 3]
## [[['factor', ['unSub'], 21], ':', ['poly', [['log', [['cropland', '+', 1]]], 3], 3], ':', ['poly', [['log', [['hpd', '+', 1]]], 3], 2]]]
if type(l) in (int, float):
return l
if isinstance(l, str):
if l == 'Intercept' or l == '"Intercept"':
return 1
elif l[0] == '"' and l[-1] == '"':
return l[1:-1]
else:
return l
if len(l) == 1 and type(l[0]) in (int, str, float, ParseResults):
return walk(l[0])
if l[0] == 'factor':
assert len(l) == 3, "unexpected number of arguments to factor"
assert len(l[1]) == 1, "argument to factor is an expression"
assert type(l[2]) == int, "second argument to factor is not an int"
return Node(Operator('=='), (Node(Operator('in'),
(l[1][0], 'float32[:]')), l[2]))
if l[0] == 'poly':
assert len(l) in (2, 3), "unexpected number of arguments to poly"
assert isinstance(l[1][1], int), "degree argument to poly is not an int"
inner = walk(l[1][0])
degree = l[1][1]
if len(l) == 2:
pwr = 1
else:
assert type(l[2]) == int, "power argument to poly is not an int"
pwr = l[2]
return Node(Operator('sel'), (Node(Operator('poly'), (inner, degree)),
pwr))
if l[0] == 'log':
assert len(l) == 2, "unexpected number of arguments to log"
args = walk(l[1])
return Node(Operator('log'), [args])
if l[0] == 'scale':
assert len(l[1]) in (3, 5), "unexpected number of arguments to scale"
args = walk(l[1][0])
return Node(Operator('scale'), [args] + l[1][1:])
if l[0] == 'I':
assert len(l) == 2, "unexpected number of arguments to I"
args = walk(l[1])
return Node(Operator('I'), [args])
# Only used for testing
if l[0] in ('sin', 'tan'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
args = walk(l[1])
return Node(Operator(l[0]), [args])
if l[0] in ('max', 'min', 'pow'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 2, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
right = walk(l[1][1])
return Node(Operator(l[0]), (left, right))
if l[0] == 'exp':
assert len(l) == 2, "unexpected number of arguments to exp"
args = walk(l[1])
return Node(Operator('exp'), [args])
if l[0] == 'clip':
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 3, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
low = walk(l[1][1])
high = walk(l[1][2])
return Node(Operator(l[0]), (left, low, high))
if l[0] == 'inv_logit':
assert len(l) == 2, "unexpected number of arguments to inv_logit"
args = walk(l[1])
return Node(Operator('inv_logit'), [args])
## Only binary operators left
if len(l) == 1:
pdb.set_trace()
pass
assert len(l) % 2 == 1, "unexpected number of arguments for binary operator"
assert len(l) != 1, "unexpected number of arguments for binary operator"
## FIXME: this only works for associative operators. Need to either
## special-case division or include an attribute that specifies
## whether the op is associative.
left = walk(l.pop(0))
op = l.pop(0)
right = walk(l)
if type(right) != Node:
return Node(Operator(op), (left, right))
elif right.type.type == op:
return Node(Operator(op), (left, ) + right.args)
return Node(Operator(op), (left, right))
### FIXME: hack
if not isinstance(text, str):
text = str(text)
new_text = re.sub('newrange = c\((\d), (\d+)\)', '\\1, \\2', text)
new_text = new_text.replace('rescale(', 'scale(')
nodes = PARSER.parseString(new_text, parseAll=True)
tree = walk(nodes)
if isinstance(tree, (str, int, float)):
tree = Node(Operator('I'), [tree])
return tree
|
# Version: 0.15+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None`. To actually use the computed version string,
your `setup.py` will need to override `distutils.command.build_scripts`
with a subclass that explicitly inserts a copy of
`versioneer.get_version()` into your script file. See
`test/demoapp-script-only/setup.py` for an example.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string, using either `tag_prefix=` or `tag_prefix=''`.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(set(refs) - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
%% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
replacements = {' ': '.', '(': '', ')': ''}
[branch_name] = [pieces.get('branch').replace(old, new)
for old, new in replacements.items()]
master = branch_name == 'master'
maint = re.match(default_maint_branch_regexp,
branch_name or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%%d" %% pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%%d" %% pieces["distance"]
suffix = []
# Put the branch name in if it isn't master nor a
# maintenance branch.
if not (master or maint):
suffix.append('%%s' %% (branch_name or 'unknown_branch'))
if pieces["dirty"]:
suffix.append('g%%s' %% pieces["short"])
rendered += '+%%s' %% ''.join(suffix)
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%%s'" %% style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(set(refs) - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
replacements = {' ': '.', '(': '', ')': ''}
branch_name = pieces.get('branch')
for old, new in replacements.items():
branch_name = branch_name.replace(old, new)
master = branch_name == 'master'
maint = re.match(default_maint_branch_regexp,
branch_name or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%d" % pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%d" % pieces["distance"]
suffix = []
# Put the branch name in if it isn't master nor a
# maintenance branch.
if not (master or maint):
suffix.append('%s' % (branch_name or 'unknown_branch'))
if pieces["dirty"]:
suffix.append('g%s' % pieces["short"])
rendered += '+%s' % ''.join(suffix)
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%s'" % style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""Project dashboard for Apache(TM) Bloodhound
WikiMacros and WikiProcessors related to dashboard system.
"""
from ast import literal_eval
from genshi.builder import tag
from trac.web.chrome import Chrome
from trac.wiki.api import parse_args
from trac.wiki.macros import WikiMacroBase
from bhdashboard.web_ui import DashboardChrome, DashboardModule
GUIDE_NAME = 'Guide'
RENAME_MAP = {'TracGuide': GUIDE_NAME + '/Index',}
def new_name(name, force=False):
if name.startswith('Trac'):
return RENAME_MAP.get(name, GUIDE_NAME + '/' + name[4:])
else:
return name
class WidgetMacro(WikiMacroBase):
"""Embed Bloodhound widgets using WikiFormatting.
"""
#: A gettext domain to translate the macro description
_domain = None
#: A macro description
_description = """Embed Bloodhound widgets using WikiFormatting."""
def expand_macro(self, formatter, name, content):
"""Render widget contents by re-using wiki markup implementation
"""
if self.env[DashboardModule] is None:
return DashboardModule(self.env).alert_disabled()
largs, kwargs = parse_args(content, strict=True)
try:
(widget_name ,) = largs
except ValueError:
template = 'widget_alert.html'
data = {
'msgtype' : 'error',
'msglabel' : 'Error',
'msgbody' : tag('Expected ', tag.code(1),
' positional argument (i.e. widget name), but got ',
tag.code(len(largs)), ' instead'),
'msgdetails' : [
('Macro name', tag.code('WidgetMacro')),
('Arguments', ', '.join(largs) if largs \
else tag.span('None', class_='label')),
],
}
else:
widget_name = widget_name.strip()
wopts = {} ; wargs = {}
def parse_literal(value):
try:
return literal_eval(value)
except (SyntaxError, ValueError):
return value
for argnm, value in kwargs.iteritems():
if argnm.startswith('wo_'):
wopts[argnm[3:]] = value
else :
wargs[argnm] = parse_literal(value)
template = 'widget.html'
data = {
'args' : wargs,
'bhdb' : DashboardChrome(self.env),
'id' : None,
'opts' : wopts,
'widget' : widget_name
}
return Chrome(self.env).render_template(
formatter.req, template, data, fragment=True)
|
#!/usr/bin/env python
''' Test harness for validating MrMangler binary
Checks itanium manglings generated by MrMangler matches
the counterpart demangling from c++filt.
'''
import argparse
import os
import subprocess
import sys
def run_mangler(func_signature, exe):
''' Runs MrMangler executable
Args:
func_signature signature to mangle
exe MrMangler executable
Returns:
(return code, output) - return code and stdout from MrMangler execution
'''
child_echo = subprocess.Popen(
['echo', '-n', func_signature], stdout=subprocess.PIPE)
child_mangler = subprocess.Popen(
exe, stdin=child_echo.stdout, stdout=subprocess.PIPE)
child_echo.stdout.close()
output = child_mangler.communicate()[0].rstrip(b'\n')
return (child_mangler.returncode, output)
def run_filt(mangled):
''' Runs c++filt executable
Args:
mangled mangled symbol to demangle
Returns:
(return code, output) - return code and stdout from c++filt execution
'''
child_echo = subprocess.Popen(
['echo', '-n', mangled], stdout=subprocess.PIPE)
child_filt = subprocess.Popen(('c++filt'), stdin=child_echo.stdout,
stdout=subprocess.PIPE)
child_echo.stdout.close()
output = child_filt.communicate()[0].rstrip(b'\n')
return (child_filt.returncode, output)
def validate_environment(filename, exe):
'''Checks script arguments and platform, exiting if not suitable
Args:
filename - Input file containing func decls to test
exe - MrMangler executable file to test
'''
def is_exe(path):
''' Returns True if @path exists and has executable permissions '''
return os.path.isfile(path) and os.access(path, os.X_OK)
if os.name != 'posix':
print('Test script only supports *nix systems')
sys.exit()
if not os.path.isfile(filename):
print('Could not find input file ' + filename)
sys.exit()
if not is_exe(exe):
print('Could not find test executable ' + exe)
sys.exit()
# check c++filt exist
found = False
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, 'c++filt')
if is_exe(exe_file):
found = True
if not found:
print('Could not locate c++filt in PATH')
sys.exit()
def main():
'''Script entry point
Returns(int): number of fails
'''
parser = argparse.ArgumentParser(
description='Test runner for MrMangler using Linux c++filt '
'to verify manglings.')
parser.add_argument(
'filename', help='Input file containing function signatures to test. '
'One signature per line.')
parser.add_argument('binary', help='MrMangler binary executable to test.')
args = parser.parse_args()
# Exit script if input files don't exist or not running on supported OS
validate_environment(args.filename, args.binary)
with open(args.filename) as test_script_fd:
passes = [] # list containing passing inputs
fails = [] # list containing tuple of fails '(input, error)'
for line in test_script_fd:
line = line.rstrip('\n')
# Mangle function decl
(return_code, mangled) = run_mangler(line, args.binary)
if return_code != 0:
fails.append((line, mangled))
continue
# Demangle our mangling
(return_code, demangled) = run_filt(mangled)
if return_code != 0:
fails.append((line, mangled))
continue
# Check if demangling matches original decl
if demangled == line:
passes.append(line)
continue
# When demangling and original differ then mangle the demangling,
# if this matches the original mangling then our mangling was
# correct.
(return_code, fallback) = run_mangler(demangled, args.binary)
if (mangled == fallback) and (return_code == 0):
passes.append(line)
else:
fails.append((line, mangled))
# Print test results
print("Total tests run: {0}".format((len(passes) + len(fails))))
print("Passes: {0}".format(len(passes)))
print("Fails: {0}".format(len(fails)))
for (expected, actual) in fails:
print('\tExpected "{0}", was "{1}"'.format(expected, actual))
return len(fails)
if __name__ == '__main__':
ret_code = main()
sys.exit(ret_code)
|
import os
from tempfile import mktemp
from numpy import *
from libtiff import TIFF
def test_write_read():
for itype in [uint8, uint16, uint32, uint64,
int8, int16, int32, int64,
float32, float64,
complex64, complex128]:
image = array([[1, 2, 3], [4, 5, 6]], itype)
fn = mktemp('.tif')
tif = TIFF.open(fn, 'w')
tif.write_image(image)
tif.close()
tif = TIFF.open(fn, 'r')
image2 = tif.read_image()
tif.close()
os.remove(fn)
assert image.dtype == image2.dtype
assert (image == image2).all()
def test_slicing():
shape = (16, 16)
image = random.randint(255, size=shape)
for i in range(shape[0]):
for j in range(shape[1]):
image1 = image[:i + 1, :j + 1]
fn = mktemp('.tif')
tif = TIFF.open(fn, 'w')
tif.write_image(image1)
tif.close()
tif = TIFF.open(fn, 'r')
image2 = tif.read_image()
tif.close()
assert (image1 == image2).all(), repr((i, j))
os.remove(fn)
|
from setuptools import setup
from setuptools import find_packages
from distutils.extension import Extension
try:
from Cython.Build import cythonize
except ImportError:
def cythonize(extensions): return extensions
sources = ['rocksdb/_rocksdb.cpp']
else:
sources = ['rocksdb/_rocksdb.pyx']
mod1 = Extension(
'rocksdb._rocksdb',
sources,
extra_compile_args=[
'-std=c++11',
'-O3',
'-Wall',
'-Wextra',
'-Wconversion',
'-fno-strict-aliasing'
],
language='c++',
libraries=[
'rocksdb',
'snappy',
'bz2',
'z'
]
)
setup(
name="pyrocksdb",
version='0.5',
description="Python bindings for RocksDB",
keywords='rocksdb',
author='Stephan Hofmockel',
author_email="Use the github issues",
url="https://github.com/stephan-hof/pyrocksdb",
license='BSD License',
install_requires=['setuptools'],
package_dir={'rocksdb': 'rocksdb'},
packages=find_packages('.'),
ext_modules=cythonize([mod1]),
test_suite='rocksdb.tests',
include_package_data=True
)
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Dialogs shown when there was a problem with a backend choice."""
import os
import sys
import functools
import html
import enum
import shutil
import argparse
import dataclasses
from typing import Any, List, Sequence, Tuple, Optional
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QDialog, QPushButton, QHBoxLayout, QVBoxLayout, QLabel,
QMessageBox, QWidget)
from PyQt5.QtNetwork import QSslSocket
from qutebrowser.config import config, configfiles
from qutebrowser.utils import (usertypes, version, qtutils, log, utils,
standarddir)
from qutebrowser.misc import objects, msgbox, savemanager, quitter
class _Result(enum.IntEnum):
"""The result code returned by the backend problem dialog."""
quit = QDialog.Accepted + 1
restart = QDialog.Accepted + 2
restart_webkit = QDialog.Accepted + 3
restart_webengine = QDialog.Accepted + 4
@dataclasses.dataclass
class _Button:
"""A button passed to BackendProblemDialog."""
text: str
setting: str
value: Any
default: bool = False
def _other_backend(backend: usertypes.Backend) -> Tuple[usertypes.Backend, str]:
"""Get the other backend enum/setting for a given backend."""
other_backend = {
usertypes.Backend.QtWebKit: usertypes.Backend.QtWebEngine,
usertypes.Backend.QtWebEngine: usertypes.Backend.QtWebKit,
}[backend]
other_setting = other_backend.name.lower()[2:]
return (other_backend, other_setting)
def _error_text(because: str, text: str, backend: usertypes.Backend) -> str:
"""Get an error text for the given information."""
other_backend, other_setting = _other_backend(backend)
if other_backend == usertypes.Backend.QtWebKit:
warning = ("<i>Note that QtWebKit hasn't been updated since "
"July 2017 (including security updates).</i>")
suffix = " (not recommended)"
else:
warning = ""
suffix = ""
return ("<b>Failed to start with the {backend} backend!</b>"
"<p>qutebrowser tried to start with the {backend} backend but "
"failed because {because}.</p>{text}"
"<p><b>Forcing the {other_backend.name} backend{suffix}</b></p>"
"<p>This forces usage of the {other_backend.name} backend by "
"setting the <i>backend = '{other_setting}'</i> option "
"(if you have a <i>config.py</i> file, you'll need to set "
"this manually). {warning}</p>".format(
backend=backend.name, because=because, text=text,
other_backend=other_backend, other_setting=other_setting,
warning=warning, suffix=suffix))
class _Dialog(QDialog):
"""A dialog which gets shown if there are issues with the backend."""
def __init__(self, *, because: str,
text: str,
backend: usertypes.Backend,
buttons: Sequence[_Button] = None,
parent: QWidget = None) -> None:
super().__init__(parent)
vbox = QVBoxLayout(self)
other_backend, other_setting = _other_backend(backend)
text = _error_text(because, text, backend)
label = QLabel(text)
label.setWordWrap(True)
label.setTextFormat(Qt.RichText)
vbox.addWidget(label)
hbox = QHBoxLayout()
buttons = [] if buttons is None else buttons
quit_button = QPushButton("Quit")
quit_button.clicked.connect(lambda: self.done(_Result.quit))
hbox.addWidget(quit_button)
backend_text = "Force {} backend".format(other_backend.name)
if other_backend == usertypes.Backend.QtWebKit:
backend_text += ' (not recommended)'
backend_button = QPushButton(backend_text)
backend_button.clicked.connect(functools.partial(
self._change_setting, 'backend', other_setting))
hbox.addWidget(backend_button)
for button in buttons:
btn = QPushButton(button.text)
btn.setDefault(button.default)
btn.clicked.connect(functools.partial(
self._change_setting, button.setting, button.value))
hbox.addWidget(btn)
vbox.addLayout(hbox)
def _change_setting(self, setting: str, value: str) -> None:
"""Change the given setting and restart."""
config.instance.set_obj(setting, value, save_yaml=True)
if setting == 'backend' and value == 'webkit':
self.done(_Result.restart_webkit)
elif setting == 'backend' and value == 'webengine':
self.done(_Result.restart_webengine)
else:
self.done(_Result.restart)
@dataclasses.dataclass
class _BackendImports:
"""Whether backend modules could be imported."""
webkit_error: Optional[str] = None
webengine_error: Optional[str] = None
class _BackendProblemChecker:
"""Check for various backend-specific issues."""
def __init__(self, *,
no_err_windows: bool,
save_manager: savemanager.SaveManager) -> None:
self._save_manager = save_manager
self._no_err_windows = no_err_windows
def _show_dialog(self, *args: Any, **kwargs: Any) -> None:
"""Show a dialog for a backend problem."""
if self._no_err_windows:
text = _error_text(*args, **kwargs)
print(text, file=sys.stderr)
sys.exit(usertypes.Exit.err_init)
dialog = _Dialog(*args, **kwargs)
status = dialog.exec()
self._save_manager.save_all(is_exit=True)
if status in [_Result.quit, QDialog.Rejected]:
pass
elif status == _Result.restart_webkit:
quitter.instance.restart(override_args={'backend': 'webkit'})
elif status == _Result.restart_webengine:
quitter.instance.restart(override_args={'backend': 'webengine'})
elif status == _Result.restart:
quitter.instance.restart()
else:
raise utils.Unreachable(status)
sys.exit(usertypes.Exit.err_init)
def _nvidia_shader_workaround(self) -> None:
"""Work around QOpenGLShaderProgram issues.
See https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826
"""
self._assert_backend(usertypes.Backend.QtWebEngine)
utils.libgl_workaround()
def _xwayland_options(self) -> Tuple[str, List[_Button]]:
"""Get buttons/text for a possible XWayland solution."""
buttons = []
text = "<p>You can work around this in one of the following ways:</p>"
if 'DISPLAY' in os.environ:
# XWayland is available, but QT_QPA_PLATFORM=wayland is set
buttons.append(
_Button("Force XWayland", 'qt.force_platform', 'xcb'))
text += ("<p><b>Force Qt to use XWayland</b></p>"
"<p>This allows you to use the newer QtWebEngine backend "
"(based on Chromium). "
"This sets the <i>qt.force_platform = 'xcb'</i> option "
"(if you have a <i>config.py</i> file, you'll need to "
"set this manually).</p>")
else:
text += ("<p><b>Set up XWayland</b></p>"
"<p>This allows you to use the newer QtWebEngine backend "
"(based on Chromium). ")
return text, buttons
def _handle_wayland_webgl(self) -> None:
"""On older graphic hardware, WebGL on Wayland causes segfaults.
See https://github.com/qutebrowser/qutebrowser/issues/5313
"""
self._assert_backend(usertypes.Backend.QtWebEngine)
if os.environ.get('QUTE_SKIP_WAYLAND_WEBGL_CHECK'):
return
platform = objects.qapp.platformName()
if platform not in ['wayland', 'wayland-egl']:
return
# Only Qt 5.14 should be affected
if not qtutils.version_check('5.14', compiled=False):
return
if qtutils.version_check('5.15', compiled=False):
return
# Newer graphic hardware isn't affected
opengl_info = version.opengl_info()
if (opengl_info is None or
opengl_info.gles or
opengl_info.version is None or
opengl_info.version >= (4, 3)):
return
# If WebGL is turned off, we're fine
if not config.val.content.webgl:
return
text, buttons = self._xwayland_options()
buttons.append(_Button("Turn off WebGL (recommended)",
'content.webgl',
False))
text += ("<p><b>Disable WebGL (recommended)</b></p>"
"This sets the <i>content.webgl = False</i> option "
"(if you have a <i>config.py</i> file, you'll need to "
"set this manually).</p>")
self._show_dialog(backend=usertypes.Backend.QtWebEngine,
because=("of frequent crashes with Qt 5.14 on "
"Wayland with older graphics hardware"),
text=text,
buttons=buttons)
def _try_import_backends(self) -> _BackendImports:
"""Check whether backends can be imported and return BackendImports."""
# pylint: disable=unused-import
results = _BackendImports()
try:
from PyQt5 import QtWebKit
from PyQt5.QtWebKit import qWebKitVersion
from PyQt5 import QtWebKitWidgets
except (ImportError, ValueError) as e:
results.webkit_error = str(e)
else:
if not qtutils.is_new_qtwebkit():
results.webkit_error = "Unsupported legacy QtWebKit found"
try:
from PyQt5 import QtWebEngineWidgets
except (ImportError, ValueError) as e:
results.webengine_error = str(e)
return results
def _handle_ssl_support(self, fatal: bool = False) -> None:
"""Check for full SSL availability.
If "fatal" is given, show an error and exit.
"""
if QSslSocket.supportsSsl():
return
if qtutils.version_check('5.12.4'):
version_text = ("If you use OpenSSL 1.0 with a PyQt package from "
"PyPI (e.g. on Ubuntu 16.04), you will need to "
"build OpenSSL 1.1 from sources and set "
"LD_LIBRARY_PATH accordingly.")
else:
version_text = ("If you use OpenSSL 1.1 with a PyQt package from "
"PyPI (e.g. on Archlinux or Debian Stretch), you "
"need to set LD_LIBRARY_PATH to the path of "
"OpenSSL 1.0 or use Qt >= 5.12.4.")
text = ("Could not initialize QtNetwork SSL support. {} This only "
"affects downloads and :adblock-update.".format(version_text))
if fatal:
errbox = msgbox.msgbox(parent=None,
title="SSL error",
text="Could not initialize SSL support.",
icon=QMessageBox.Critical,
plain_text=False)
errbox.exec()
sys.exit(usertypes.Exit.err_init)
assert not fatal
log.init.warning(text)
def _check_backend_modules(self) -> None:
"""Check for the modules needed for QtWebKit/QtWebEngine."""
imports = self._try_import_backends()
if not imports.webkit_error and not imports.webengine_error:
return
elif imports.webkit_error and imports.webengine_error:
text = ("<p>qutebrowser needs QtWebKit or QtWebEngine, but "
"neither could be imported!</p>"
"<p>The errors encountered were:<ul>"
"<li><b>QtWebKit:</b> {webkit_error}"
"<li><b>QtWebEngine:</b> {webengine_error}"
"</ul></p>".format(
webkit_error=html.escape(imports.webkit_error),
webengine_error=html.escape(imports.webengine_error)))
errbox = msgbox.msgbox(parent=None,
title="No backend library found!",
text=text,
icon=QMessageBox.Critical,
plain_text=False)
errbox.exec()
sys.exit(usertypes.Exit.err_init)
elif objects.backend == usertypes.Backend.QtWebKit:
if not imports.webkit_error:
return
self._show_dialog(
backend=usertypes.Backend.QtWebKit,
because="QtWebKit could not be imported",
text="<p><b>The error encountered was:</b><br/>{}</p>".format(
html.escape(imports.webkit_error))
)
elif objects.backend == usertypes.Backend.QtWebEngine:
if not imports.webengine_error:
return
self._show_dialog(
backend=usertypes.Backend.QtWebEngine,
because="QtWebEngine could not be imported",
text="<p><b>The error encountered was:</b><br/>{}</p>".format(
html.escape(imports.webengine_error))
)
raise utils.Unreachable
def _handle_cache_nuking(self) -> None:
"""Nuke the QtWebEngine cache if the Qt version changed.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-72532
"""
if not configfiles.state.qt_version_changed:
return
# Only nuke the cache in cases where we know there are problems.
# It seems these issues started with Qt 5.12.
# They should be fixed with Qt 5.12.5:
# https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/265408
if qtutils.version_check('5.12.5', compiled=False):
return
log.init.info("Qt version changed, nuking QtWebEngine cache")
cache_dir = os.path.join(standarddir.cache(), 'webengine')
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
def _handle_serviceworker_nuking(self) -> None:
"""Nuke the service workers directory if the Qt version changed.
WORKAROUND for:
https://bugreports.qt.io/browse/QTBUG-72532
https://bugreports.qt.io/browse/QTBUG-82105
"""
if ('serviceworker_workaround' not in configfiles.state['general'] and
qtutils.version_check('5.14', compiled=False)):
# Nuke the service worker directory once for every install with Qt
# 5.14, given that it seems to cause a variety of segfaults.
configfiles.state['general']['serviceworker_workaround'] = '514'
affected = True
else:
# Otherwise, just nuke it when the Qt version changed.
affected = configfiles.state.qt_version_changed
if not affected:
return
service_worker_dir = os.path.join(standarddir.data(), 'webengine',
'Service Worker')
bak_dir = service_worker_dir + '-bak'
if not os.path.exists(service_worker_dir):
return
log.init.info("Qt version changed, removing service workers")
# Keep one backup around - we're not 100% sure what persistent data
# could be in there, but this folder can grow to ~300 MB.
if os.path.exists(bak_dir):
shutil.rmtree(bak_dir)
shutil.move(service_worker_dir, bak_dir)
def _assert_backend(self, backend: usertypes.Backend) -> None:
assert objects.backend == backend, objects.backend
def check(self) -> None:
"""Run all checks."""
self._check_backend_modules()
if objects.backend == usertypes.Backend.QtWebEngine:
self._handle_ssl_support()
self._nvidia_shader_workaround()
self._handle_wayland_webgl()
self._handle_cache_nuking()
self._handle_serviceworker_nuking()
else:
self._assert_backend(usertypes.Backend.QtWebKit)
self._handle_ssl_support(fatal=True)
def init(*, args: argparse.Namespace,
save_manager: savemanager.SaveManager) -> None:
"""Run all checks."""
checker = _BackendProblemChecker(no_err_windows=args.no_err_windows,
save_manager=save_manager)
checker.check()
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import datetime
from random import randint
import urllib2
from django.core.cache import cache
from django.test import Client
from mock import patch, MagicMock
from rapidsms.contrib.locations.models import Location, LocationType
from survey.investigator_configs import COUNTRY_PHONE_CODE
from survey.models import Investigator, Backend, Household, HouseholdHead, Batch, HouseholdMemberGroup, NumericalAnswer, \
Question, TextAnswer, QuestionOption, MultiChoiceAnswer, AnswerRule, BatchQuestionOrder, GroupCondition, Survey, \
RandomHouseHoldSelection, EnumerationArea
from survey.models.households import HouseholdMember
from survey.tests.ussd.ussd_base_test import USSDBaseTest, FakeRequest
from survey.ussd.ussd import USSD
from survey.ussd.ussd_survey import USSDSurvey
class USSDTest(USSDBaseTest):
def setUp(self):
self.client = Client()
self.ussd_params = {
'transactionId': "123344" + str(randint(1, 99999)),
'transactionTime': datetime.datetime.now().strftime('%Y%m%dT%H:%M:%S'),
'msisdn': '2567765' + str(randint(1, 99999)),
'ussdServiceCode': '130',
'ussdRequestString': '',
'response': "false"
}
self.open_survey = Survey.objects.create(name="open survey", description="open survey", has_sampling=True)
city = LocationType.objects.create(name="City")
self.mbarara = Location.objects.create(name="Mbarara", type=city)
self.ea = EnumerationArea.objects.create(name="EA2", survey=self.open_survey)
self.ea.locations.add(self.mbarara)
self.investigator = Investigator.objects.create(name="investigator name",
mobile_number=self.ussd_params['msisdn'].replace(
COUNTRY_PHONE_CODE, '', 1),
ea=self.ea,
backend=Backend.objects.create(name='something'))
self.household = Household.objects.create(investigator=self.investigator, ea=self.investigator.ea,
survey=self.open_survey, uid=0)
self.household_head = HouseholdHead.objects.create(household=self.household, surname="Surname",
date_of_birth=datetime.date(1980, 9, 1))
self.household_1 = Household.objects.create(investigator=self.investigator, ea=self.investigator.ea,
survey=self.open_survey, uid=1)
self.household_head_1 = HouseholdHead.objects.create(household=self.household_1,
surname="Name " + str(randint(1, 9999)),
date_of_birth=datetime.date(1980, 9, 1))
self.household_member = HouseholdMember.objects.create(surname="Name 2", household=self.household_1,
date_of_birth=datetime.date(2000, 2, 3))
self.batch = Batch.objects.create(order=1, name="batch test", survey=self.open_survey)
self.batch.open_for_location(self.investigator.location)
self.member_group = HouseholdMemberGroup.objects.create(name="5 to 6 years", order=0)
def test_knows_can_resume_survey_if_investigator_has_open_batches_or_is_registering_households(self):
ussd_survey = USSDSurvey(self.investigator, FakeRequest())
self.assertTrue(ussd_survey.can_resume_survey(is_registering=False))
self.assertTrue(ussd_survey.can_resume_survey(is_registering=True))
self.batch.close_for_location(self.investigator.location)
self.assertFalse(ussd_survey.can_resume_survey(is_registering=False))
self.assertTrue(ussd_survey.can_resume_survey(is_registering=True))
def test_list_household_members_after_selecting_household(self):
household_member1 = HouseholdMember.objects.create(household=self.household, surname="abcd", male=False,
date_of_birth='1989-02-02')
household_member2 = HouseholdMember.objects.create(household=self.household, surname="xyz", male=False,
date_of_birth='1989-02-02')
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
members_list = "%s\n1: %s - (respondent)*\n2: %s*\n3: %s*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, household_member1.surname,
household_member2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_goes_back_to_household_list_if_investigator_selects_household_and_chooses_not_to_retake_survey(self):
HouseholdMember.objects.filter(householdhead=None).delete()
head_group = HouseholdMemberGroup.objects.create(name="General", order=1)
condition = GroupCondition.objects.create(value='HEAD', attribute="GENERAL", condition="EQUALS")
condition.groups.add(head_group)
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=head_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
self.batch.open_for_location(self.investigator.location)
self.investigator.member_answered(question_1, self.household_head, 1, self.batch)
self.investigator.member_answered(question_1, self.household_head_1, 1, self.batch)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household('2')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('2')
households_list = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
members_list = "%s\n1: %s - (respondent)*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_goes_back_to_household_list_if_investigator_selects_household_with_no_members_and_chooses_retake(self):
Household.objects.all().delete()
HouseholdMember.objects.all().delete()
self.household1_without_members = Household.objects.create(investigator=self.investigator,
ea=self.investigator.ea,
survey=self.open_survey, uid=1)
self.household2_without_members = Household.objects.create(investigator=self.investigator,
ea=self.investigator.ea,
survey=self.open_survey, uid=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household('2')
response_string = "responseString=%s&action=request" % USSD.MESSAGES['RETAKE_SURVEY']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond('1')
households_list = "%s\n1: HH-%s*\n2: HH-%s*" % (USSD.MESSAGES['HOUSEHOLD_LIST'], 0, 0)
response_string = "responseString=%s&action=request" % households_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_restart_survey_option_yes_with_household_set(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
cache.set(session_string, session)
members_list = "%s\n1: %s - (respondent)*\n2: %s*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head_1.surname, self.household_member.surname)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
ussd_survey.restart_survey()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(members_list, ussd_survey.responseString)
def test_ussd_restart_survey_option_yes_with_household_member_set(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
restart_message = "Thank you. You have completed this household. Would you like to retake this household?\n1: Yes\n2: No"
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
ussd_survey.restart_survey()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(restart_message, ussd_survey.responseString)
def test_ussd_restart_survey_option_no(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = cache.get(session_string)
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
households_list = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '2'
ussd_survey.restart_survey()
self.assertIsNone(ussd_survey.get_from_session('HOUSEHOLD'))
self.assertIsNone(ussd_survey.get_from_session('HOUSEHOLD_MEMBER'))
self.assertIsNone(ussd_survey.household)
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(households_list, ussd_survey.responseString)
def test_ussd_render_welcome_text_if_investigator_has_no_households(self):
household_message = "Sorry, you have no households registered."
new_investigator = Investigator.objects.create(name="new investigator",
mobile_number="001122334",
location=Location.objects.create(name="Entebbe"),
backend=Backend.objects.create(name='another'))
ussd_survey = USSDSurvey(new_investigator, FakeRequest())
ussd_survey.render_welcome_text()
self.assertEqual(USSDSurvey.ACTIONS['END'], ussd_survey.action)
self.assertEqual(household_message, ussd_survey.responseString)
def test_end_interview_if_batch_questions_answered_more_than_time_out_minutes_ago(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
with patch.object(HouseholdMember, 'survey_completed', return_value=False):
with patch.object(HouseholdMember, 'last_question_answered', return_value=[1]):
with patch.object(HouseholdMember, 'can_retake_survey', return_value=False):
ussd_survey.end_interview(self.batch)
self.assertEqual(USSD.MESSAGES['BATCH_5_MIN_TIMEDOUT_MESSAGE'], ussd_survey.responseString)
self.assertEqual(USSD.ACTIONS['END'], ussd_survey.action)
def test_render_household_list_should_behave_like_new_request_if_no_household_selected(self):
request = FakeRequest()
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['HOUSEHOLD_MEMBER'] = self.household_member
session['PAGE'] = '1'
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.render_households_list(self.open_survey)
self.assertEqual(USSD.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE'], ussd_survey.responseString)
def test_end_interview_if_batch_questions_answered_within_time_out_minutes_ago(self):
request = FakeRequest()
request['ussdRequestString'] = 1
session_string = "SESSION-%s-%s" % ('1234567890', USSDSurvey.__name__)
session = {}
session['HOUSEHOLD'] = self.household_1
session['PAGE'] = '1'
session['HOUSEHOLD_MEMBER'] = self.household_member
cache.set(session_string, session)
ussd_survey = USSDSurvey(self.investigator, request)
ussd_survey.request['ussdRequestString'] = '1'
with patch.object(HouseholdMember, 'survey_completed', return_value=False):
with patch.object(HouseholdMember, 'can_retake_survey', return_value=True):
ussd_survey.end_interview(self.batch)
self.assertEqual(USSD.MESSAGES['SUCCESS_MESSAGE'], ussd_survey.responseString)
self.assertEqual(USSD.ACTIONS['END'], ussd_survey.action)
def test_ussd_render_welcome_text_if_investigator_has_households(self):
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
welcome_message = "%s\n%s: Households list" % (homepage, USSD.HOUSEHOLD_LIST_OPTION)
ussd_survey = USSDSurvey(self.investigator, FakeRequest())
ussd_survey.render_welcome_text()
self.assertEqual(USSDSurvey.ACTIONS['REQUEST'], ussd_survey.action)
self.assertEqual(welcome_message, ussd_survey.responseString)
def test_renders_welcome_message_if_investigator_does_not_select_option_one_or_two_from_welcome_screen(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
response = self.respond('10')
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_numerical_questions(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("4")
self.assertEquals(4, NumericalAnswer.objects.get(investigator=self.investigator,
question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(2, NumericalAnswer.objects.get(investigator=self.investigator,
question=question_2).answer)
def test_textual_questions(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.TEXT, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.TEXT, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("Reply one")
self.assertEquals(self.ussd_params['ussdRequestString'],
TextAnswer.objects.get(investigator=self.investigator, question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("Reply two")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(self.ussd_params['ussdRequestString'],
TextAnswer.objects.get(investigator=self.investigator, question=question_2).answer)
def test_multichoice_questions(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_1_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.MULTICHOICE, order=2, group=self.member_group)
option_2_1 = QuestionOption.objects.create(question=question_2, text="OPTION 1", order=1)
option_2_2 = QuestionOption.objects.create(question=question_2, text="OPTION 2", order=2)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond(str(option_1_1.order))
self.assertEquals(option_1_1,
MultiChoiceAnswer.objects.get(investigator=self.investigator,
question=question_1).answer)
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond(str(option_2_1.order))
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(option_2_1,
MultiChoiceAnswer.objects.get(investigator=self.investigator,
question=question_2).answer)
def test_multichoice_questions_pagination(self):
question = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1 = QuestionOption.objects.create(question=question, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question, text="OPTION 3", order=3)
option_4 = QuestionOption.objects.create(question=question, text="OPTION 4", order=4)
option_5 = QuestionOption.objects.create(question=question, text="OPTION 5", order=5)
option_6 = QuestionOption.objects.create(question=question, text="OPTION 6", order=6)
option_7 = QuestionOption.objects.create(question=question, text="OPTION 7", order=7)
back_text = Question.PREVIOUS_PAGE_TEXT
next_text = Question.NEXT_PAGE_TEXT
question_2 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=2, group=self.member_group)
option_8 = QuestionOption.objects.create(question=question_2, text="OPTION 1", order=1)
option_9 = QuestionOption.objects.create(question=question_2, text="OPTION 2", order=2)
question.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
page_1 = "%s\n1: %s\n2: %s\n3: %s\n%s" % (question.text, option_1.text, option_2.text, option_3.text, next_text)
page_2 = "%s\n4: %s\n5: %s\n6: %s\n%s\n%s" % (
question.text, option_4.text, option_5.text, option_6.text, back_text, next_text)
page_3 = "%s\n7: %s\n%s" % (question.text, option_7.text, back_text)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_3
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("*")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("*")
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("#")
response_string = "responseString=%s&action=request" % page_2
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.to_ussd()
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_reanswer_question(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
rule = AnswerRule.objects.create(question=question_2, action=AnswerRule.ACTIONS['REANSWER'],
condition=AnswerRule.CONDITIONS['GREATER_THAN_QUESTION'],
validate_with_question=question_1)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("5")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("10")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("5")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_2.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_text_invalid_answer(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.TEXT, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("something")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_numerical_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("a")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_multichoice_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s" % (question_1.text, option_1.text, option_2.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("a")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + page_1)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("4")
response_string = "responseString=%s&action=request" % ("INVALID ANSWER: " + page_1)
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_end_interview_confirmation(self):
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS'], validate_with_value=0)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household("1")
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.respond("0")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 1)
self.assertEquals(0, NumericalAnswer.objects.count())
response = self.respond("0")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
self.assertFalse(self.household.has_pending_survey())
self.assertTrue(self.household_1.has_pending_survey())
self.assertFalse(self.investigator.completed_open_surveys())
self.set_questions_answered_to_twenty_minutes_ago()
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household("2")
members_list = "%s\n1: %s - (respondent)\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head_1.surname, self.household_member.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
response = self.choose_menu_to_take_survey()
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household('1')
members_list = "%s\n1: %s - (respondent)*" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_end_interview_confirmation_alternative(self):
question_1 = Question.objects.create(text="How many members are there in this household?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="How many of them are male?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
rule = AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['END_INTERVIEW'],
condition=AnswerRule.CONDITIONS['EQUALS'], validate_with_value=0)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.assertEquals(len(self.investigator.get_from_cache('CONFIRM_END_INTERVIEW')), 0)
response = self.select_household_member()
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("0")
response_string = "responseString=%s&action=request" % ("RECONFIRM: " + question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertEquals(0, NumericalAnswer.objects.count())
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_should_show_member_completion_message_and_choose_to_go_to_member_list(self):
member_2 = HouseholdMember.objects.create(surname="Name 2", household=self.household,
date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, member_2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['MEMBER_SUCCESS_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
members_list = "%s\n1: %s - (respondent)*\n2: %s" % (
USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname, member_2.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_should_show_thank_you_message_on_completion_of_all_members_questions(self):
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
response_string = "responseString=%s&action=request" % USSD.MESSAGES['HOUSEHOLD_COMPLETION_MESSAGE']
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("2")
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_welcome_screen_should_show_message_and_options_for_registration_and_take_survey(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_choosing_take_survey_should_render_household_list(self):
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
self.select_samples()
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_take_survey()
households_list_1 = "%s\n1: HH-%s-%s*\n2: HH-%s-%s*" % (
USSD.MESSAGES['HOUSEHOLD_LIST'], self.household_head.household.random_sample_number,
self.household_head.surname,
self.household_head_1.household.random_sample_number, self.household_head_1.surname)
response_string = "responseString=%s&action=request" % households_list_1
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_choosing_registering_HH_should_set_cache(self):
self.investigator = Investigator.objects.get(id=self.investigator.pk)
self.batch.close_for_location(self.investigator.location)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
self.select_samples()
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_register_household()
self.assertTrue(self.investigator.get_from_cache('IS_REGISTERING_HOUSEHOLD'))
def test_resume_should_show_welcome_text_if_open_batch_is_closed_on_session_timeout(self):
question_1 = Question.objects.create(text="Question 1?",
answer_type=Question.NUMBER, order=1, group=self.member_group)
question_2 = Question.objects.create(text="Question 2?",
answer_type=Question.NUMBER, order=2, group=self.member_group)
question_1.batches.add(self.batch)
question_2.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.choose_menu_to_take_survey()
response = self.select_household()
members_list = "%s\n1: %s - (respondent)" % (USSD.MESSAGES['MEMBERS_LIST'], self.household_head.surname)
response_string = "responseString=%s&action=request" % members_list
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.select_household_member("1")
response_string = "responseString=%s&action=request" % question_1.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("1")
response_string = "responseString=%s&action=request" % question_2.text
self.assertEquals(urllib2.unquote(response.content), response_string)
self.batch.close_for_location(self.investigator.location)
response = self.reset_session()
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_empty_string(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = ''
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_without_application_extension(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '*257#'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_with_application_extension(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '*153*10#'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_ussd_new_parameter_request_short_code_with_application_code_set_and_application_code_posted(self):
self.ussd_params['transactionId'] = "123344" + str(randint(1, 99999))
self.ussd_params['response'] = 'false'
self.ussd_params['ussdRequestString'] = '10'
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, "is_active", return_value=False):
response = self.client.post('/ussd', data=self.ussd_params)
homepage = "Welcome %s to the survey.\n1: Register households\n2: Take survey" % self.investigator.name
response_string = "responseString=%s&action=request" % homepage
self.assertEquals(urllib2.unquote(response.content), response_string)
def test_subquestion_of_different_type_from_a_multichoice_parent_question_should_not_invoke_invalid_answer(self):
HouseholdMember.objects.create(surname="Name 2", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="This is a question",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
question_1.batches.add(self.batch)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question_1, text="specify", order=3)
sub_question_1 = Question.objects.create(text="some subquestion of question 1",
group=self.member_group,
answer_type=Question.TEXT, subquestion=True, parent=question_1)
sub_question_1.batches.add(self.batch)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['ASK_SUBQUESTION'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'], validate_with_option=option_3,
next_question=sub_question_1)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s\n3: %s" % (question_1.text, option_1.text, option_2.text, option_3.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("3")
response_string = "responseString=%s&action=request" % (sub_question_1.text)
self.assertEquals(urllib2.unquote(response.content), response_string)
self.assertNotIn("INVALID ANSWER", urllib2.unquote(response.content))
def test_should_not_repeat_question_after_answer_has_been_given_the_answer_rule_is_not_repeat(self):
HouseholdMember.objects.create(surname="Surname", household=self.household, date_of_birth='1980-02-03')
question_1 = Question.objects.create(text="Question 1- with Skip logic",
answer_type=Question.MULTICHOICE, order=1, group=self.member_group)
option_1 = QuestionOption.objects.create(question=question_1, text="OPTION 1", order=1)
option_2 = QuestionOption.objects.create(question=question_1, text="OPTION 2", order=2)
option_3 = QuestionOption.objects.create(question=question_1, text="specify", order=3)
question_2 = Question.objects.create(text="question 2 - skipped",
answer_type=Question.TEXT, order=2, group=self.member_group)
question_3 = Question.objects.create(text="question 3 - skipped to",
answer_type=Question.TEXT, order=3, group=self.member_group)
question_4 = Question.objects.create(text="question 4",
answer_type=Question.NUMBER, order=4, group=self.member_group)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_1, order=1)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_2, order=2)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_3, order=3)
BatchQuestionOrder.objects.create(batch=self.batch, question=question_4, order=4)
self.batch.questions.add(question_1, question_2, question_3, question_4)
AnswerRule.objects.create(question=question_1, action=AnswerRule.ACTIONS['SKIP_TO'],
condition=AnswerRule.CONDITIONS['EQUALS_OPTION'], validate_with_option=option_3,
next_question=question_3)
mock_filter = MagicMock()
mock_filter.exists.return_value = True
with patch.object(RandomHouseHoldSelection.objects, 'filter', return_value=mock_filter):
with patch.object(Survey, "currently_open_survey", return_value=self.open_survey):
with patch.object(USSDSurvey, 'is_active', return_value=False):
self.reset_session()
self.choose_menu_to_take_survey()
self.select_household()
response = self.select_household_member()
page_1 = "%s\n1: %s\n2: %s\n3: %s" % (question_1.text, option_1.text, option_2.text, option_3.text)
response_string = "responseString=%s&action=request" % page_1
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("3")
response_string = "responseString=%s&action=request" % question_3.text
self.assertEquals(urllib2.unquote(response.content), response_string)
response = self.respond("akampa")
response_string = "responseString=%s&action=request" % question_4.text
self.assertEquals(urllib2.unquote(response.content), response_string)
|
import numpy as np
# from numpy.random import randint
my_list = [1,2,3,4,5,6]
new_list = [[1,2,3], [4,5,6], [7,8,9]]
# 1D array
print('Casting a premade list into a 1D numpy array')
print(np.array(my_list))
# 2D array, note the extra brackets being displayed
print('\nCasting a list of lists into a 2D numpy array')
print(np.array(new_list))
# similar to regular range function
# (start, stop, step)
print('\n np.arange to create a 1D array from (start, stop, step)')
print(np.arange(0,10,2))
# returns evenly space points between (start, stop, num=50)
# only a 1D array
# example below returns 30 evenly space pts between 0 and 5
print('\n np.linspace to return evenly space arrays from (start, stop, num)')
print(np.linspace(0,5,30))
# arrays of zeros and ones
# 2D arrays as we're passing in tuples
print('\n Zeros and Ones')
print(np.zeros((3,3)))
print()
print(np.ones((3,3)))
# identity matrix - for linear algebra problems
# returns a 2D array with ones on the diagonal and zeros elsewhere
# will square the argument, thus example below is returning a 7x7 array
print('\n Identity Matrix')
print(np.eye(7))
# random.rand
# returns random values in a given shape, not ints
# 1st example is 1D array
# 2nd example is 2D array, note we don't have to pass in tuples as like before
print('\n random.rand as a 1D array')
print(np.random.rand(5))
print('\n random.rand as a 2D array')
print(np.random.rand(5,5))
# random.randn
# returns sample from "Standard Normal"/ Gaussian distribution
# 2D plus arrays no need to pass in tuples either
print('\n Standard Normal/ Gaussian distribution in a 1D array')
print(np.random.randn(7))
print('\n Same Gaussian except in a 2D array if 2 arguments were passed in')
print(np.random.randn(4,4))
# random.randint
# returns 1 random int if size is not specified
# (low, high, size)
print('\n random.randint to return n random ints from (low, high, size)')
print(np.random.randint(0,10,5))
# reshaping an array
# first build a 1D array using np.arange
# then reshape and assign to a new variable
# note that total size of new array must remain the same
# if OG array was only 25 elements, we cannot reshape it into a 5x10 array
print('\n array.reshape on an array created with np.arange(0, 25)')
arr = np.arange(0,25)
print(arr)
arr2 = arr.reshape(5,5)
print('\n Note reshaping does not alter the original array,\n so we assigned it to a new variable')
print(arr2)
# shape attribute
print('\n the shape of the array is {}'.format(arr2.shape))
# finding max and min
# finding position of the max and min
# finding the type of the array with dtype attribute
randr = np.random.randint(0,100,20)
print('\n finding the max/min of a random array')
print(randr)
print('\nThe max is {} and min is {}'.format(randr.max(), randr.min()))
print('The max of {} is located at position {}'.format(randr.max(), randr.argmax()))
print('The min of {} is located at position {}'.format(randr.min(), randr.argmin()))
print('\nThe type of the array is {}'.format(randr.dtype))
|
from ase import Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.tddft import TDDFT, photoabsorption_spectrum, \
LinearAbsorbingBoundary, P4AbsorbingBoundary, PML
from gpaw.test import equal
import os
# Sodium dimer, Na2
d = 1.5
atoms = Atoms(symbols='Na2',
positions=[( 0, 0, d),
( 0, 0,-d)],
pbc=False)
# Calculate ground state for TDDFT
# Larger box
atoms.center(vacuum=6.0)
# Larger grid spacing, LDA is ok
gs_calc = GPAW(nbands=1, h=0.35, xc='LDA', setups={'Na': '1'})
atoms.set_calculator(gs_calc)
e = atoms.get_potential_energy()
niter = gs_calc.get_number_of_iterations()
gs_calc.write('na2_gs.gpw', 'all')
# 16 fs run with 8.0 attosec time step
time_step = 8.0 # 8.0 as (1 as = 0.041341 autime)5D
iters = 10 # 2000 x 8 as => 16 fs
# Weak delta kick to z-direction
kick = [0,0,1e-3]
# TDDFT calculator
td_calc = TDDFT('na2_gs.gpw')
# Kick
td_calc.absorption_kick(kick)
# Propagate
td_calc.propagate(time_step, iters, 'na2_dmz.dat', 'na2_td.gpw')
# Linear absorption spectrum
photoabsorption_spectrum('na2_dmz.dat', 'na2_spectrum_z.dat', width=0.3)
iters = 3
# test restart
td_rest = TDDFT('na2_td.gpw')
td_rest.propagate(time_step, iters, 'na2_dmz2.dat', 'na2_td2.gpw')
# test restart
td_rest = TDDFT('na2_td.gpw', solver='BiCGStab')
td_rest.propagate(time_step, iters, 'na2_dmz3.dat', 'na2_td3.gpw')
# test absorbing boundary conditions
# linear imaginary potential
td_ipabs = TDDFT('na2_td.gpw')
ip_abc = LinearAbsorbingBoundary(5.0, 0.01, atoms.positions)
td_ipabs.set_absorbing_boundary(ip_abc)
td_ipabs.propagate(time_step, iters, 'na2_dmz4.dat', 'na2_td4.gpw')
# 4th order polynomial (1-(x^2-1)^2) imaginary potential
td_ip4abs = TDDFT('na2_td.gpw')
ip4_abc = P4AbsorbingBoundary(5.0, 0.03, atoms.positions, 3.0)
td_ip4abs.set_absorbing_boundary(ip4_abc)
td_ip4abs.propagate(time_step, iters, 'na2_dmz5.dat', 'na2_td5.gpw')
# perfectly matched layers
td_pmlabs = TDDFT('na2_td.gpw', solver='BiCGStab')
pml_abc = PML(100.0, 0.1)
td_pmlabs.set_absorbing_boundary(pml_abc)
td_pmlabs.propagate(time_step, iters, 'na2_dmz6.dat', 'na2_td6.gpw')
# photoabsorption_spectrum('na2_dmz2.dat', 'na2_spectrum_z2.dat', width=0.3)
#os.remove('na2_gs.gpw')
#os.remove('na2_td.gpw')
#os.remove('na2_dmz.dat')
#os.remove('na2_spectrum_z.dat')
#os.remove('na2_td2.gpw')
#os.remove('na2_dmz2.dat')
# os.remove('na2_spectrum_z2.dat')
#energy_tolerance = 0.0001
#niter_tolerance = 0
#equal(e, -1.24941356939, energy_tolerance) # svnversion 5252
#equal(niter, 21, niter_tolerance) # svnversion 5252
|
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = 'http://www.xn--hentaienespaol-1nb.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart=''))
itemlist.append(
Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='',
fanart=''))
return itemlist
def todas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?'
patron += 'height="170px" src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle # .decode('utf-8')
thumbnail = scrapedthumbnail
fanart = ''
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
# Paginacion
title = ''
siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">')
title = 'Pagina Siguiente >>> '
fanart = ''
itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return todas(item)
else:
return []
|
"""Bump version and create Github release
This script should be run locally, not on a build server.
"""
import argparse
import contextlib
import os
import re
import subprocess
import sys
import git
import github
import changelog
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def main():
bumpversion_parts = get_bumpversion_parts()
parser = argparse.ArgumentParser()
parser.add_argument("part", choices=bumpversion_parts, help="part of version to bump")
parser.add_argument("--skip-sanity-checks", action="store_true")
parser.add_argument("--skip-push", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--confirm", action="store_true")
args = parser.parse_args()
if args.dry_run:
print "DRY RUN. Nothing will be committed/pushed."
repo = Repo('lbry', args.part, ROOT)
branch = 'master'
print 'Current version: {}'.format(repo.current_version)
print 'New version: {}'.format(repo.new_version)
if not args.confirm and not confirm():
print "Aborting"
return 1
if not args.skip_sanity_checks:
run_sanity_checks(repo, branch)
repo.assert_new_tag_is_absent()
is_rc = re.search('\drc\d+$', repo.new_version) is not None
# only have a release message for real releases, not for RCs
release_msg = None if is_rc else repo.get_unreleased_changelog()
if release_msg is None:
release_msg = ''
if args.dry_run:
print "rc: " + ("yes" if is_rc else "no")
print "release message: \n" + (release_msg if not is_rc else " NO MESSAGE FOR RCs")
return
gh_token = get_gh_token()
auth = github.Github(gh_token)
github_repo = auth.get_repo('lbryio/lbry')
if not is_rc:
repo.bump_changelog()
repo.bumpversion()
new_tag = repo.get_new_tag()
github_repo.create_git_release(new_tag, new_tag, release_msg, draft=True, prerelease=is_rc)
if args.skip_push:
print (
'Skipping push; you will have to reset and delete tags if '
'you want to run this script again.'
)
else:
repo.git_repo.git.push(follow_tags=True, recurse_submodules='check')
class Repo(object):
def __init__(self, name, part, directory):
self.name = name
self.part = part
if not self.part:
raise Exception('Part required')
self.directory = directory
self.git_repo = git.Repo(self.directory)
self._bumped = False
self.current_version = self._get_current_version()
self.new_version = self._get_new_version()
self._changelog = changelog.Changelog(os.path.join(self.directory, 'CHANGELOG.md'))
def get_new_tag(self):
return 'v' + self.new_version
def get_unreleased_changelog(self):
return self._changelog.get_unreleased()
def bump_changelog(self):
self._changelog.bump(self.new_version)
with pushd(self.directory):
self.git_repo.git.add(os.path.basename(self._changelog.path))
def _get_current_version(self):
with pushd(self.directory):
output = subprocess.check_output(
['bumpversion', '--dry-run', '--list', '--allow-dirty', self.part])
return re.search('^current_version=(.*)$', output, re.M).group(1)
def _get_new_version(self):
with pushd(self.directory):
output = subprocess.check_output(
['bumpversion', '--dry-run', '--list', '--allow-dirty', self.part])
return re.search('^new_version=(.*)$', output, re.M).group(1)
def bumpversion(self):
if self._bumped:
raise Exception('Cowardly refusing to bump a repo twice')
with pushd(self.directory):
subprocess.check_call(['bumpversion', '--allow-dirty', self.part])
self._bumped = True
def assert_new_tag_is_absent(self):
new_tag = self.get_new_tag()
tags = self.git_repo.git.tag()
if new_tag in tags.split('\n'):
raise Exception('Tag {} is already present in repo {}.'.format(new_tag, self.name))
def is_behind(self, branch):
self.git_repo.remotes.origin.fetch()
rev_list = '{branch}...origin/{branch}'.format(branch=branch)
commits_behind = self.git_repo.git.rev_list(rev_list, right_only=True, count=True)
commits_behind = int(commits_behind)
return commits_behind > 0
def get_bumpversion_parts():
with pushd(ROOT):
output = subprocess.check_output([
'bumpversion', '--dry-run', '--list', '--allow-dirty', 'fake-part',
])
parse_line = re.search('^parse=(.*)$', output, re.M).group(1)
return tuple(re.findall('<([^>]+)>', parse_line))
def get_gh_token():
if 'GH_TOKEN' in os.environ:
return os.environ['GH_TOKEN']
else:
print """
Please enter your personal access token. If you don't have one
See https://github.com/lbryio/lbry-app/wiki/Release-Script#generate-a-personal-access-token
for instructions on how to generate one.
You can also set the GH_TOKEN environment variable to avoid seeing this message
in the future"""
return raw_input('token: ').strip()
def confirm():
try:
return raw_input('Is this what you want? [y/N] ').strip().lower() == 'y'
except KeyboardInterrupt:
return False
def run_sanity_checks(repo, branch):
if repo.git_repo.is_dirty():
print 'Cowardly refusing to release a dirty repo'
sys.exit(1)
if repo.git_repo.active_branch.name != branch:
print 'Cowardly refusing to release when not on the {} branch'.format(branch)
sys.exit(1)
if repo.is_behind(branch):
print 'Cowardly refusing to release when behind origin'
sys.exit(1)
if not is_custom_bumpversion_version():
print (
'Install LBRY\'s fork of bumpversion: '
'pip install -U git+https://github.com/lbryio/bumpversion.git'
)
sys.exit(1)
def is_custom_bumpversion_version():
try:
output = subprocess.check_output(['bumpversion', '-v'], stderr=subprocess.STDOUT).strip()
if output == 'bumpversion 0.5.4-lbry':
return True
except (subprocess.CalledProcessError, OSError):
pass
return False
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(previous_dir)
if __name__ == '__main__':
sys.exit(main())
|
import numpy as np
def spatial_rate_map(x, y, t, spike_train, binsize=0.01, box_xlen=1,
box_ylen=1, mask_unvisited=True, convolve=True,
return_bins=False, smoothing=0.02):
"""Divide a 2D space in bins of size binsize**2, count the number of spikes
in each bin and divide by the time spent in respective bins. The map can
then be convolved with a gaussian kernel of size csize determined by the
smoothing factor, binsize and box_xlen.
Parameters
----------
spike_train : neo.SpikeTrain
x : float
1d vector of x positions
y : float
1d vector of y positions
t : float
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins, ybins
"""
if not all([len(var) == len(var2) for var in [x,y,t] for var2 in [x,y,t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError('box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.append(t, t[-1] + np.median(np.diff(t)))
spikes_in_bin, _ = np.histogram(spike_train, t_)
time_in_bin = np.diff(t_)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
spike_pos = np.zeros((xbins.size, ybins.size))
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
spike_pos[ix[n], iy[n]] += spikes_in_bin[n]
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map
spike_pos = spike_pos[1:, 1:]
time_pos = time_pos[1:, 1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins, ybins
else:
return rate.T
def gridness(rate_map, box_xlen, box_ylen, return_acorr=False,
step_size=0.1, method='iter', return_masked_acorr=False):
'''Calculates gridness of a rate map. Calculates the normalized
autocorrelation (A) of a rate map B where A is given as
A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's
product-moment correlation coefficients is calculated between A and A_{rot}
rotated 30 and 60 degrees. Finally the gridness is calculated as the
difference between the minimum of coefficients at 60 degrees and the
maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30).
If the method 'iter' is chosen:
In order to focus the analysis on symmetry of A the the central and the
outer part of the gridness is maximized by increasingly mask A at steps of
``step_size``.
If the method 'puncture' is chosen:
This is the standard way of calculating gridness, by masking the central
autocorrelation bump, in addition to rounding the map. See examples.
Parameters
----------
rate_map : numpy.ndarray
box_xlen : float
side length of quadratic box
step_size : float
step size in masking, only applies to the method "iter"
return_acorr : bool
return autocorrelation map or not
return_masked_acorr : bool
return masked autocorrelation map or not
method : 'iter' or 'puncture'
Returns
-------
out : gridness, (autocorrelation map, masked autocorrelation map)
Examples
--------
>>> from exana.tracking.tools import make_test_grid_rate_map
>>> import matplotlib.pyplot as plt
>>> rate_map, pos = make_test_grid_rate_map()
>>> iter_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='iter')
>>> print('%.2f' % iter_score)
1.39
>>> puncture_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='puncture')
>>> print('%.2f' % puncture_score)
0.96
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from exana.tracking.tools import make_test_grid_rate_map
from exana.tracking import gridness
import matplotlib.pyplot as plt
rate_map, _ = make_test_grid_rate_map()
fig, axs = plt.subplots(2, 2)
g1, acorr, m_acorr1 = gridness(rate_map, box_xlen=1,
box_ylen=1, return_acorr=True,
return_masked_acorr=True,
method='iter')
g2, m_acorr2 = gridness(rate_map, box_xlen=1,
box_ylen=1,
return_masked_acorr=True,
method='puncture')
mats = [rate_map, m_acorr1, acorr, m_acorr2]
titles = ['Rate map', 'Masked acorr "iter", gridness = %.2f' % g1,
'Autocorrelation',
'Masked acorr "puncture", gridness = %.2f' % g2]
for ax, mat, title in zip(axs.ravel(), mats, titles):
ax.imshow(mat)
ax.set_title(title)
plt.tight_layout()
plt.show()
'''
import numpy.ma as ma
from exana.misc.tools import fftcorrelate2d
from exana.tracking.tools import gaussian2D
from scipy.optimize import curve_fit
tmp_map = rate_map.copy()
tmp_map[~np.isfinite(tmp_map)] = 0
acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True)
rows, cols = acorr.shape
b_x = np.linspace(- box_xlen / 2., box_xlen / 2., rows)
b_y = np.linspace(- box_ylen / 2., box_ylen / 2., cols)
B_x, B_y = np.meshgrid(b_x, b_y)
if method == 'iter':
if return_masked_acorr: m_acorrs = []
gridscores = []
for outer in np.arange(box_xlen / 4, box_xlen / 2, step_size):
m_acorr = ma.masked_array(
acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer)
for inner in np.arange(0, box_xlen / 4, step_size):
m_acorr = ma.masked_array(
m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner)
r30, r60 = rotate_corr(m_acorr)
gridscores.append(np.min(r60) - np.max(r30))
if return_masked_acorr: m_acorrs.append(m_acorr)
gridscore = max(gridscores)
if return_masked_acorr: m_acorr = m_acorrs[gridscores.index(gridscore)]
elif method == 'puncture':
# round picture edges
_gaussian = lambda pos, a, s: gaussian2D(a, pos[0], pos[1], 0, 0, s).ravel()
p0 = (max(acorr.ravel()), min(box_xlen, box_ylen) / 100)
popt, pcov = curve_fit(_gaussian, (B_x, B_y), acorr.ravel(), p0=p0)
m_acorr = ma.masked_array(
acorr, mask=np.sqrt(B_x**2 + B_y**2) > min(box_xlen, box_ylen) / 2)
m_acorr = ma.masked_array(
m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < popt[1])
r30, r60 = rotate_corr(m_acorr)
gridscore = float(np.min(r60) - np.max(r30))
if return_acorr and return_masked_acorr:
return gridscore, acorr, m_acorr
if return_masked_acorr:
return gridscore, m_acorr
if return_acorr:
return gridscore, acorr # acorrs[grids.index(max(grids))]
else:
return gridscore
def rotate_corr(acorr):
from exana.misc.tools import masked_corrcoef2d
from scipy.ndimage.interpolation import rotate
angles = range(30, 180+30, 30)
corr = []
# Rotate and compute correlation coefficient
for angle in angles:
rot_acorr = rotate(acorr, angle, reshape=False)
corr.append(masked_corrcoef2d(rot_acorr, acorr)[0, 1])
r60 = corr[1::2]
r30 = corr[::2]
return r30, r60
def occupancy_map(x, y, t,
binsize=0.01,
box_xlen=1,
box_ylen=1,
mask_unvisited=True,
convolve=True,
return_bins=False,
smoothing=0.02):
'''Divide a 2D space in bins of size binsize**2, count the time spent
in each bin. The map can be convolved with a gaussian kernel of size
csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
x : array
1d vector of x positions
y : array
1d vector of y positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
occupancy_map : numpy.ndarray
if return_bins = True
out : occupancy_map, xbins, ybins
'''
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))])
time_in_bin = np.diff(t_)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x) - 1):
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
time_pos = time_pos[1:, 1:]
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins, ybins
else:
return rate.T
def nvisits_map(x, y, t,
binsize=0.01,
box_xlen=1,
box_ylen=1,
return_bins=False):
'''Divide a 2D space in bins of size binsize**2, count the
number of visits in each bin. The map can be convolved with
a gaussian kernel of size determined by the smoothing factor,
binsize and box_xlen.
Parameters
----------
x : array
1d vector of x positions
y : array
1d vector of y positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
Returns
-------
nvisits_map : numpy.ndarray
if return_bins = True
out : nvisits_map, xbins, ybins
'''
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
nvisits_map = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
if n == 0:
nvisits_map[ix[n], iy[n]] = 1
else:
if ix[n-1] != ix[n] or iy[n-1] != iy[n]:
nvisits_map[ix[n], iy[n]] += 1
# correct for shifting of map since digitize returns values at right edges
nvisits_map = nvisits_map[1:, 1:]
if return_bins:
return nvisits_map.T, xbins, ybins
else:
return nvisits_map.T
def spatial_rate_map_1d(x, t, spike_train,
binsize=0.01,
track_len=1,
mask_unvisited=True,
convolve=True,
return_bins=False,
smoothing=0.02):
"""Take x coordinates of linear track data, divide in bins of binsize,
count the number of spikes in each bin and divide by the time spent in
respective bins. The map can then be convolved with a gaussian kernel of
size csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
spike_train : array
x : array
1d vector of x positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins
"""
if not all([len(var) == len(var2) for var in [x, t] for var2 in [x, t]]):
raise ValueError('x, t must have same number of elements')
if track_len < x.max():
raise ValueError('track length must be\
larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(track_len)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))])
spikes_in_bin, _ = np.histogram(spike_train, t_)
time_in_bin = np.diff(t_)
xbins = np.arange(0, track_len + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
spike_pos = np.zeros(xbins.size)
time_pos = np.zeros(xbins.size)
for n in range(len(x)):
spike_pos[ix[n]] += spikes_in_bin[n]
time_pos[ix[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
spike_pos = spike_pos[1:]
time_pos = time_pos[1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (track_len / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins
else:
return rate.T
def separate_fields(rate_map, laplace_thrsh=0, center_method='maxima',
cutoff_method='none', box_xlen=1, box_ylen=1, index=False):
"""Separates fields using the laplacian to identify fields separated by
a negative second derivative.
Parameters
----------
rate_map : np 2d array
firing rate in each bin
laplace_thrsh : float
value of laplacian to separate fields by relative to the minima. Should be
on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at
min(laplace(rate_map)). Default 0.
center_method : string
method to find field centers. Valid options = ['center_of_mass',
'maxima','gaussian_fit']
cutoff_method (optional) : string or function
function to exclude small fields. If local field value of function
is lower than global function value, the field is excluded. Valid
string_options = ['median', 'mean','none'].
index : bool, default False
return bump center values as index or xy-pos
Returns
-------
fields : numpy array, shape like rate_map.
contains areas all filled with same value, corresponding to fields
in rate_map. The values are in range(1,nFields + 1), sorted by size of the
field (sum of all field values). 0 elsewhere.
n_field : int
field count
bump_centers : (n_field x 2) np ndarray
Coordinates of field centers
"""
cutoff_functions = {'mean':np.mean, 'median':np.median, 'none':None}
if not callable(cutoff_method):
try:
cutoff_func = cutoff_functions[cutoff_method]
except KeyError:
msg = "invalid cutoff_method flag '%s'" % cutoff_method
raise ValueError(msg)
else:
cutoff_func = cutoff_method
from scipy import ndimage
l = ndimage.laplace(rate_map)
l[l>laplace_thrsh*np.min(l)] = 0
# Labels areas of the laplacian not connected by values > 0.
fields, n_fields = ndimage.label(l)
# index 0 is the background
indx = np.arange(1,n_fields+1)
# Use cutoff method to remove unwanted fields
if cutoff_method != 'none':
try:
total_value = cutoff_func(fields)
except:
print('Unexpected error, cutoff_func doesnt like the input:')
raise
field_values = ndimage.labeled_comprehension(rate_map, fields, indx,
cutoff_func, float, 0)
try:
is_field = field_values >= total_value
except:
print('cutoff_func return_values doesnt want to compare:')
raise
if np.sum(is_field) == 0:
return np.zeros(rate_map.shape), 0, np.array([[],[]])
for i in indx:
if not is_field[i-1]:
fields[fields == i] = 0
n_fields = ndimage.label(fields, output=fields)
indx = np.arange(1,n_fields + 1)
# Sort by largest mean
sizes = ndimage.labeled_comprehension(rate_map, fields, indx,
np.mean, float, 0)
size_sort = np.argsort(sizes)[::-1]
new = np.zeros_like(fields)
for i in np.arange(n_fields):
new[fields == size_sort[i]+1] = i+1
fields = new
bc = get_bump_centers(rate_map,labels=fields,ret_index=index,indices=indx,method=center_method,
units=box_xlen.units)
# TODO exclude fields where maxima is on the edge of the field?
return fields, n_fields, bc
def get_bump_centers(rate_map, labels, ret_index=False, indices=None, method='maxima',
units=1):
"""Finds center of fields at labels."""
from scipy import ndimage
if method not in ['maxima','center_of_mass','gaussian_fit']:
msg = "invalid center_method flag '%s'" % method
raise ValueError(msg)
if indices is None:
indices = np.arange(1,np.max(labels)+1)
if method == 'maxima':
bc = ndimage.maximum_position(rate_map, labels=labels,
index=indices)
elif method == 'center_of_mass':
bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices)
elif method == 'gaussian_fit':
from exana.tracking.tools import fit_gauss_asym
bc = np.zeros((len(indices),2))
import matplotlib.pyplot as plt
for i in indices:
r = rate_map.copy()
r[labels != i] = 0
popt = fit_gauss_asym(r, return_data=False)
# TODO Find out which axis is x and which is y
bc[i-1] = (popt[2],popt[1])
if ret_index:
msg = 'ret_index not implemented for gaussian fit'
raise NotImplementedError(msg)
if not ret_index and not method=='gaussian_fit':
bc = (bc + np.array((0.5,0.5)))/rate_map.shape
return np.array(bc)*units
def find_avg_dist(rate_map, thrsh = 0, plot=False):
"""Uses autocorrelation and separate_fields to find average distance
between bumps. Is dependent on high gridness to get separate bumps in
the autocorrelation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
thrsh (optional) : float, default 0
cutoff value for the laplacian of the autocorrelation function.
Should be a negative number. Gives better separation if bumps are
connected by "bridges" or saddles where the laplacian is negative.
plot (optional) : bool, default False
plot acorr and the separated acorr, with bump centers
Returns
-------
avg_dist : float
relative units from 0 to 1 of the box size
"""
from scipy.ndimage import maximum_position
from exana.misc.tools import fftcorrelate2d
# autocorrelate. Returns array (2x - 1) the size of rate_map
acorr = fftcorrelate2d(rate_map,rate_map, mode = 'full', normalize = True)
#acorr[acorr<0] = 0 # TODO Fix this
f, nf, bump_centers = separate_fields(acorr,laplace_thrsh=thrsh,
center_method='maxima',cutoff_method='median')
# TODO Find a way to find valid value for
# thrsh, or remove.
bump_centers = np.array(bump_centers)
# find dists from center in (autocorrelation)relative units (from 0 to 1)
distances = np.linalg.norm(bump_centers - (0.5,0.5), axis = 1)
dist_sort = np.argsort(distances)
distances = distances[dist_sort]
# use maximum 6 closest values except center value
avg_dist = np.median(distances[1:7])
# correct for difference in shapes
avg_dist *= acorr.shape[0]/rate_map.shape[0] # = 1.98
# TODO : raise warning if too big difference between points
if plot:
import matplotlib.pyplot as plt
fig,[ax1,ax2] = plt.subplots(1,2)
ax1.imshow(acorr,extent = (0,1,0,1),origin='lower')
ax1.scatter(*(bump_centers[:,::-1].T))
ax2.imshow(f,extent = (0,1,0,1),origin='lower')
ax2.scatter(*(bump_centers[:,::-1].T))
return avg_dist
def fit_hex(bump_centers, avg_dist=None, plot_bumps = False, method='best'):
"""Fits a hex grid to a given set of bumps. Uses the three bumps most
Parameters
----------
bump_centers : Nx2 np.array
x,y positions of bump centers, x,y /in (0,1)
avg_dist (optional): float
average spacing between bumps
plot_bumps (optional): bool
if True, plots at the three bumps most likely to be in
correct hex-position to the current matplotlib axes.
method (optional): string, valid options: ['closest', 'best']
method to find angle from neighboring bumps.
'closest' uses six bumps nearest to center bump
'best' uses the two bumps nearest to avg_dist
Returns
-------
displacement : float
distance of bump closest to the center in meters
orientation : float
orientation of hexagon (in degrees)
"""
valid_methods = ['closest', 'best']
if method not in valid_methods:
msg = "invalid method flag '%s'" % method
raise ValueError(msg)
bump_centers = np.array(bump_centers)
# sort by distance to center
d = np.linalg.norm(bump_centers - (0.5,0.5), axis=1)
d_sort = np.argsort(d)
dist_sorted = bump_centers[d_sort]
center_bump = dist_sorted[0]; others = dist_sorted[1:]
displacement = d[d_sort][0]
# others distances to center bumps
relpos = others - center_bump
reldist = np.linalg.norm(relpos, axis=1)
if method == 'closest':
# get 6 closest bumps
rel_sort = np.argsort(reldist)
closest = others[rel_sort][:6]
relpos = relpos[rel_sort][:6]
elif method == 'best':
# get 2 bumps such that /sum_{i\neqj}(\abs{r_i-r_j}-avg_ist)^2 is minimized
squares = 1e32*np.ones((others.shape[0], others.shape[0]))
for i in range(len(relpos)):
for j in range(i,len(relpos)):
rel1 = (reldist[i] - avg_dist)**2
rel2 = (reldist[j] - avg_dist)**2
rel3 = (np.linalg.norm(relpos[i]-relpos[j]) - avg_dist)**2
squares[i,j] = rel1 + rel2 + rel3
rel_slice = np.unravel_index(np.argmin(squares), squares.shape)
rel_slice = np.array(rel_slice)
#rel_sort = np.argsort(np.abs(reldist-avg_dist))
closest = others[rel_slice]
relpos = relpos[rel_slice]
# sort by angle
a = np.arctan2(relpos[:,1], relpos[:,0])%(2*np.pi)
a_sort = np.argsort(a)
# extract lowest angle and convert to degrees
orientation = a[a_sort][0] *180/np.pi
# hex grid is symmetric under rotations of 60deg
orientation %= 60
if plot_bumps:
import matplotlib.pyplot as plt
ax=plt.gca()
i = 1
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
dx = xmax-xmin; dy = ymax - ymin
closest = closest[a_sort]
edges = [center_bump] if method == 'best' else []
edges += [c for c in closest]
edges = np.array(edges)*(dx,dy) + (xmin, ymin)
poly = plt.Polygon(edges, alpha=0.5,color='r')
ax.add_artist(poly)
return displacement, orientation
def calculate_grid_geometry(rate_map, plot_fields=False, **kwargs):
"""Calculates quantitative information about grid field.
Find bump centers, bump spacing, center diplacement and hexagon
orientation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
plot_fields : if True, plots the field labels with field centers to the
current matplotlib ax. Default False
thrsh : float, default 0
see find_avg_dist()
center_method : string, valid options: ['maxima', 'center_of_mass']
default: 'center_of_mass'
see separate_fields()
method : string, valid options: ['closest', 'best']
see fit_hex()
Returns
-------
bump_centers : 2d np.array
x,y positions of bump centers
avg_dist : float
average spacing between bumps, \in [0,1]
displacement : float
distance of bump closest to the center
orientation : float
orientation of hexagon (in degrees)
Examples
--------
>>> import numpy as np
>>> rate_map = np.zeros((5,5))
>>> pos = np.array([ [0,2],
... [1,0],[1,4],
... [2,2],
... [3,0],[3,4],
... [4,2]])
>>> for(i,j) in pos:
... rate_map[i,j] = 1
...
>>> result = calculate_grid_geometry(rate_map)
"""
# TODO add back the following when it is correct
# (array([[0.5, 0.9],
# [0.9, 0.7],
# [0.1, 0.7],
# [0.5, 0.5],
# [0.9, 0.3],
# [0.1, 0.3],
# [0.5, 0.1]]) * m, 0.4472135954999579, 0.0, 26.565051177077983)
from scipy.ndimage import mean, center_of_mass
# TODO: smooth data?
# smooth_rate_map = lambda x:x
# rate_map = smooth_rate_map(rate_map)
center_method = kwargs.pop('center_method',None)
if center_method:
fields, nfields, bump_centers = separate_fields(rate_map,
center_method=center_method)
else:
fields, nfields, bump_centers = separate_fields(rate_map)
if bump_centers.size == 0:
import warnings
msg = 'couldnt find bump centers, returning None'
warnings.warn(msg, RuntimeWarning, stacklevel=2)
return None,None,None,None,
sh = np.array(rate_map.shape)
if plot_fields:
print(fields)
import matplotlib.pyplot as plt
x=np.linspace(0,1,sh[0]+1)
y=np.linspace(0,1,sh[1]+1)
x,y = np.meshgrid(x,y)
ax = plt.gca()
print('nfields: ',nfields)
plt.pcolormesh(x,y, fields)
# switch from row-column to x-y
bump_centers = bump_centers[:,::-1]
thrsh = kwargs.pop('thrsh', None)
if thrsh:
avg_dist = find_avg_dist(rate_map, thrsh)
else:
avg_dist = find_avg_dist(rate_map)
displacement, orientation = fit_hex(bump_centers, avg_dist,
plot_bumps=plot_fields, **kwargs)
return bump_centers, avg_dist, displacement, orientation
class RandomDisplacementBounds(object):
"""random displacement with bounds"""
def __init__(self, xmin, xmax, stepsize=0.5):
self.xmin = np.array(xmin)
self.xmax = np.array(xmax)
self.stepsize = stepsize
def __call__(self, x):
"""take a random step but ensure the new position is within the bounds"""
while True:
# this could be done in a much more clever way, but it will work for example purposes
xnew = x + (self.xmax-self.xmin)*np.random.uniform(-self.stepsize,
self.stepsize, np.shape(x))
if np.all(xnew < self.xmax) and np.all(xnew > self.xmin):
break
return xnew
def optimize_sep_fields(rate_map,step = 0.04, niter=40, T = 1.0, method = 'SLSQP',
glob=True, x0 = [0.065,0.1],callback=None):
"""Optimizes the separation of the fields by minimizing an error
function
Parameters
----------
rate_map :
method :
valid methods=['L-BFGS-B', 'TNC', 'SLSQP']
x0 : list
initial values for smoothing smoothing and laplace_thrsh
Returns
--------
res :
Result of the optimization. Contains smoothing and laplace_thrsh in
attribute res.x"""
from scipy import optimize
from exana.tracking.tools import separation_error_func as err_func
valid_methods = ['L-BFGS-B', 'TNC', 'SLSQP']
if method not in valid_methods:
raise ValueError('invalid method flag %s' %method)
rate_map[np.isnan(rate_map)] = 0.
method = 'SLSQP'
xmin = [0.025, 0]
xmax = [0.2, 1]
bounds = [(low,high) for low,high in zip(xmin,xmax)]
obj_func = lambda args: err_func(args[0], args[1], rate_map)
if glob:
take_step = RandomDisplacementBounds(xmin, xmax,stepsize=step)
minimizer_kwargs = dict(method=method, bounds=bounds)
res = optimize.basinhopping(obj_func, x0, niter=niter, T = T,
minimizer_kwargs=minimizer_kwargs,
take_step=take_step,callback=callback)
else:
res = optimize.minimize(obj_func, x0, method=method, bounds = bounds, options={'disp': True})
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
|
#
#
# Copyright (C) 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module for query operations
How it works:
- Add field definitions
- See how L{NODE_FIELDS} is built
- Each field gets:
- Query field definition (L{objects.QueryFieldDefinition}, use
L{_MakeField} for creating), containing:
- Name, must be lowercase and match L{FIELD_NAME_RE}
- Title for tables, must not contain whitespace and match
L{TITLE_RE}
- Value data type, e.g. L{constants.QFT_NUMBER}
- Human-readable description, must not end with punctuation or
contain newlines
- Data request type, see e.g. C{NQ_*}
- OR-ed flags, see C{QFF_*}
- A retrieval function, see L{Query.__init__} for description
- Pass list of fields through L{_PrepareFieldList} for preparation and
checks
- Instantiate L{Query} with prepared field list definition and selected fields
- Call L{Query.RequestedData} to determine what data to collect/compute
- Call L{Query.Query} or L{Query.OldStyleQuery} with collected data and use
result
- Data container must support iteration using C{__iter__}
- Items are passed to retrieval functions and can have any format
- Call L{Query.GetFields} to get list of definitions for selected fields
@attention: Retrieval functions must be idempotent. They can be called multiple
times, in any order and any number of times.
"""
import logging
import operator
import re
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import compat
from ganeti import objects
from ganeti import ht
from ganeti import runtime
from ganeti import qlang
from ganeti import jstore
from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER,
QFT_UNIT, QFT_TIMESTAMP, QFT_OTHER,
RS_NORMAL, RS_UNKNOWN, RS_NODATA,
RS_UNAVAIL, RS_OFFLINE)
(NETQ_CONFIG,
NETQ_GROUP,
NETQ_STATS,
NETQ_INST) = range(300, 304)
# Constants for requesting data from the caller/data provider. Each property
# collected/computed separately by the data provider should have its own to
# only collect the requested data and not more.
(NQ_CONFIG,
NQ_INST,
NQ_LIVE,
NQ_GROUP,
NQ_OOB) = range(1, 6)
(IQ_CONFIG,
IQ_LIVE,
IQ_DISKUSAGE,
IQ_CONSOLE,
IQ_NODES,
IQ_NETWORKS) = range(100, 106)
(LQ_MODE,
LQ_OWNER,
LQ_PENDING) = range(10, 13)
(GQ_CONFIG,
GQ_NODE,
GQ_INST,
GQ_DISKPARAMS) = range(200, 204)
(CQ_CONFIG,
CQ_QUEUE_DRAINED,
CQ_WATCHER_PAUSE) = range(300, 303)
(JQ_ARCHIVED, ) = range(400, 401)
# Query field flags
QFF_HOSTNAME = 0x01
QFF_IP_ADDRESS = 0x02
QFF_JOB_ID = 0x04
QFF_SPLIT_TIMESTAMP = 0x08
# Next values: 0x10, 0x20, 0x40, 0x80, 0x100, 0x200
QFF_ALL = (QFF_HOSTNAME | QFF_IP_ADDRESS | QFF_JOB_ID | QFF_SPLIT_TIMESTAMP)
FIELD_NAME_RE = re.compile(r"^[a-z0-9/._]+$")
TITLE_RE = re.compile(r"^[^\s]+$")
DOC_RE = re.compile(r"^[A-Z].*[^.,?!]$")
#: Verification function for each field type
_VERIFY_FN = {
QFT_UNKNOWN: ht.TNone,
QFT_TEXT: ht.TString,
QFT_BOOL: ht.TBool,
QFT_NUMBER: ht.TInt,
QFT_UNIT: ht.TInt,
QFT_TIMESTAMP: ht.TNumber,
QFT_OTHER: lambda _: True,
}
# Unique objects for special field statuses
_FS_UNKNOWN = object()
_FS_NODATA = object()
_FS_UNAVAIL = object()
_FS_OFFLINE = object()
#: List of all special status
_FS_ALL = compat.UniqueFrozenset([
_FS_UNKNOWN,
_FS_NODATA,
_FS_UNAVAIL,
_FS_OFFLINE,
])
#: VType to QFT mapping
_VTToQFT = {
# TODO: fix validation of empty strings
constants.VTYPE_STRING: QFT_OTHER, # since VTYPE_STRINGs can be empty
constants.VTYPE_MAYBE_STRING: QFT_OTHER,
constants.VTYPE_BOOL: QFT_BOOL,
constants.VTYPE_SIZE: QFT_UNIT,
constants.VTYPE_INT: QFT_NUMBER,
}
_SERIAL_NO_DOC = "%s object serial number, incremented on each modification"
def _GetUnknownField(ctx, item): # pylint: disable=W0613
"""Gets the contents of an unknown field.
"""
return _FS_UNKNOWN
def _GetQueryFields(fielddefs, selected):
"""Calculates the internal list of selected fields.
Unknown fields are returned as L{constants.QFT_UNKNOWN}.
@type fielddefs: dict
@param fielddefs: Field definitions
@type selected: list of strings
@param selected: List of selected fields
"""
result = []
for name in selected:
try:
fdef = fielddefs[name]
except KeyError:
fdef = (_MakeField(name, name, QFT_UNKNOWN, "Unknown field '%s'" % name),
None, 0, _GetUnknownField)
assert len(fdef) == 4
result.append(fdef)
return result
def GetAllFields(fielddefs):
"""Extract L{objects.QueryFieldDefinition} from field definitions.
@rtype: list of L{objects.QueryFieldDefinition}
"""
return [fdef for (fdef, _, _, _) in fielddefs]
class _FilterHints:
"""Class for filter analytics.
When filters are used, the user of the L{Query} class usually doesn't know
exactly which items will be necessary for building the result. It therefore
has to prepare and compute the input data for potentially returning
everything.
There are two ways to optimize this. The first, and simpler, is to assign
each field a group of data, so that the caller can determine which
computations are necessary depending on the data groups requested. The list
of referenced groups must also be computed for fields referenced in the
filter.
The second is restricting the items based on a primary key. The primary key
is usually a unique name (e.g. a node name). This class extracts all
referenced names from a filter. If it encounters any filter condition which
disallows such a list to be determined (e.g. a non-equality filter), all
names will be requested.
The end-effect is that any operation other than L{qlang.OP_OR} and
L{qlang.OP_EQUAL} will make the query more expensive.
"""
def __init__(self, namefield):
"""Initializes this class.
@type namefield: string
@param namefield: Field caller is interested in
"""
self._namefield = namefield
#: Whether all names need to be requested (e.g. if a non-equality operator
#: has been used)
self._allnames = False
#: Which names to request
self._names = None
#: Data kinds referenced by the filter (used by L{Query.RequestedData})
self._datakinds = set()
def RequestedNames(self):
"""Returns all requested values.
Returns C{None} if list of values can't be determined (e.g. encountered
non-equality operators).
@rtype: list
"""
if self._allnames or self._names is None:
return None
return utils.UniqueSequence(self._names)
def ReferencedData(self):
"""Returns all kinds of data referenced by the filter.
"""
return frozenset(self._datakinds)
def _NeedAllNames(self):
"""Changes internal state to request all names.
"""
self._allnames = True
self._names = None
def NoteLogicOp(self, op):
"""Called when handling a logic operation.
@type op: string
@param op: Operator
"""
if op != qlang.OP_OR:
self._NeedAllNames()
def NoteUnaryOp(self, op, datakind): # pylint: disable=W0613
"""Called when handling an unary operation.
@type op: string
@param op: Operator
"""
if datakind is not None:
self._datakinds.add(datakind)
self._NeedAllNames()
def NoteBinaryOp(self, op, datakind, name, value):
"""Called when handling a binary operation.
@type op: string
@param op: Operator
@type name: string
@param name: Left-hand side of operator (field name)
@param value: Right-hand side of operator
"""
if datakind is not None:
self._datakinds.add(datakind)
if self._allnames:
return
# If any operator other than equality was used, all names need to be
# retrieved
if op == qlang.OP_EQUAL and name == self._namefield:
if self._names is None:
self._names = []
self._names.append(value)
else:
self._NeedAllNames()
def _WrapLogicOp(op_fn, sentences, ctx, item):
"""Wrapper for logic operator functions.
"""
return op_fn(fn(ctx, item) for fn in sentences)
def _WrapUnaryOp(op_fn, inner, ctx, item):
"""Wrapper for unary operator functions.
"""
return op_fn(inner(ctx, item))
def _WrapBinaryOp(op_fn, retrieval_fn, value, ctx, item):
"""Wrapper for binary operator functions.
"""
return op_fn(retrieval_fn(ctx, item), value)
def _WrapNot(fn, lhs, rhs):
"""Negates the result of a wrapped function.
"""
return not fn(lhs, rhs)
def _PrepareRegex(pattern):
"""Compiles a regular expression.
"""
try:
return re.compile(pattern)
except re.error, err:
raise errors.ParameterError("Invalid regex pattern (%s)" % err)
def _PrepareSplitTimestamp(value):
"""Prepares a value for comparison by L{_MakeSplitTimestampComparison}.
"""
if ht.TNumber(value):
return value
else:
return utils.MergeTime(value)
def _MakeSplitTimestampComparison(fn):
"""Compares split timestamp values after converting to float.
"""
return lambda lhs, rhs: fn(utils.MergeTime(lhs), rhs)
def _MakeComparisonChecks(fn):
"""Prepares flag-specific comparisons using a comparison function.
"""
return [
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(fn),
_PrepareSplitTimestamp),
(QFF_JOB_ID, lambda lhs, rhs: fn(jstore.ParseJobId(lhs), rhs),
jstore.ParseJobId),
(None, fn, None),
]
class _FilterCompilerHelper:
"""Converts a query filter to a callable usable for filtering.
"""
# String statement has no effect, pylint: disable=W0105
#: How deep filters can be nested
_LEVELS_MAX = 10
# Unique identifiers for operator groups
(_OPTYPE_LOGIC,
_OPTYPE_UNARY,
_OPTYPE_BINARY) = range(1, 4)
"""Functions for equality checks depending on field flags.
List of tuples containing flags and a callable receiving the left- and
right-hand side of the operator. The flags are an OR-ed value of C{QFF_*}
(e.g. L{QFF_HOSTNAME} or L{QFF_SPLIT_TIMESTAMP}).
Order matters. The first item with flags will be used. Flags are checked
using binary AND.
"""
_EQUALITY_CHECKS = [
(QFF_HOSTNAME,
lambda lhs, rhs: utils.MatchNameComponent(rhs, [lhs],
case_sensitive=False),
None),
(QFF_SPLIT_TIMESTAMP, _MakeSplitTimestampComparison(operator.eq),
_PrepareSplitTimestamp),
(None, operator.eq, None),
]
"""Known operators
Operator as key (C{qlang.OP_*}), value a tuple of operator group
(C{_OPTYPE_*}) and a group-specific value:
- C{_OPTYPE_LOGIC}: Callable taking any number of arguments; used by
L{_HandleLogicOp}
- C{_OPTYPE_UNARY}: Always C{None}; details handled by L{_HandleUnaryOp}
- C{_OPTYPE_BINARY}: Callable taking exactly two parameters, the left- and
right-hand side of the operator, used by L{_HandleBinaryOp}
"""
_OPS = {
# Logic operators
qlang.OP_OR: (_OPTYPE_LOGIC, compat.any),
qlang.OP_AND: (_OPTYPE_LOGIC, compat.all),
# Unary operators
qlang.OP_NOT: (_OPTYPE_UNARY, None),
qlang.OP_TRUE: (_OPTYPE_UNARY, None),
# Binary operators
qlang.OP_EQUAL: (_OPTYPE_BINARY, _EQUALITY_CHECKS),
qlang.OP_NOT_EQUAL:
(_OPTYPE_BINARY, [(flags, compat.partial(_WrapNot, fn), valprepfn)
for (flags, fn, valprepfn) in _EQUALITY_CHECKS]),
qlang.OP_LT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.lt)),
qlang.OP_LE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.le)),
qlang.OP_GT: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.gt)),
qlang.OP_GE: (_OPTYPE_BINARY, _MakeComparisonChecks(operator.ge)),
qlang.OP_REGEXP: (_OPTYPE_BINARY, [
(None, lambda lhs, rhs: rhs.search(lhs), _PrepareRegex),
]),
qlang.OP_CONTAINS: (_OPTYPE_BINARY, [
(None, operator.contains, None),
]),
}
def __init__(self, fields):
"""Initializes this class.
@param fields: Field definitions (return value of L{_PrepareFieldList})
"""
self._fields = fields
self._hints = None
self._op_handler = None
def __call__(self, hints, qfilter):
"""Converts a query filter into a callable function.
@type hints: L{_FilterHints} or None
@param hints: Callbacks doing analysis on filter
@type qfilter: list
@param qfilter: Filter structure
@rtype: callable
@return: Function receiving context and item as parameters, returning
boolean as to whether item matches filter
"""
self._op_handler = {
self._OPTYPE_LOGIC:
(self._HandleLogicOp, getattr(hints, "NoteLogicOp", None)),
self._OPTYPE_UNARY:
(self._HandleUnaryOp, getattr(hints, "NoteUnaryOp", None)),
self._OPTYPE_BINARY:
(self._HandleBinaryOp, getattr(hints, "NoteBinaryOp", None)),
}
try:
filter_fn = self._Compile(qfilter, 0)
finally:
self._op_handler = None
return filter_fn
def _Compile(self, qfilter, level):
"""Inner function for converting filters.
Calls the correct handler functions for the top-level operator. This
function is called recursively (e.g. for logic operators).
"""
if not (isinstance(qfilter, (list, tuple)) and qfilter):
raise errors.ParameterError("Invalid filter on level %s" % level)
# Limit recursion
if level >= self._LEVELS_MAX:
raise errors.ParameterError("Only up to %s levels are allowed (filter"
" nested too deep)" % self._LEVELS_MAX)
# Create copy to be modified
operands = qfilter[:]
op = operands.pop(0)
try:
(kind, op_data) = self._OPS[op]
except KeyError:
raise errors.ParameterError("Unknown operator '%s'" % op)
(handler, hints_cb) = self._op_handler[kind]
return handler(hints_cb, level, op, op_data, operands)
def _LookupField(self, name):
"""Returns a field definition by name.
"""
try:
return self._fields[name]
except KeyError:
raise errors.ParameterError("Unknown field '%s'" % name)
def _HandleLogicOp(self, hints_fn, level, op, op_fn, operands):
"""Handles logic operators.
@type hints_fn: callable
@param hints_fn: Callback doing some analysis on the filter
@type level: integer
@param level: Current depth
@type op: string
@param op: Operator
@type op_fn: callable
@param op_fn: Function implementing operator
@type operands: list
@param operands: List of operands
"""
if hints_fn:
hints_fn(op)
return compat.partial(_WrapLogicOp, op_fn,
[self._Compile(op, level + 1) for op in operands])
def _HandleUnaryOp(self, hints_fn, level, op, op_fn, operands):
"""Handles unary operators.
@type hints_fn: callable
@param hints_fn: Callback doing some analysis on the filter
@type level: integer
@param level: Current depth
@type op: string
@param op: Operator
@type op_fn: callable
@param op_fn: Function implementing operator
@type operands: list
@param operands: List of operands
"""
assert op_fn is None
if len(operands) != 1:
raise errors.ParameterError("Unary operator '%s' expects exactly one"
" operand" % op)
if op == qlang.OP_TRUE:
(_, datakind, _, retrieval_fn) = self._LookupField(operands[0])
if hints_fn:
hints_fn(op, datakind)
op_fn = operator.truth
arg = retrieval_fn
elif op == qlang.OP_NOT:
if hints_fn:
hints_fn(op, None)
op_fn = operator.not_
arg = self._Compile(operands[0], level + 1)
else:
raise errors.ProgrammerError("Can't handle operator '%s'" % op)
return compat.partial(_WrapUnaryOp, op_fn, arg)
def _HandleBinaryOp(self, hints_fn, level, op, op_data, operands):
"""Handles binary operators.
@type hints_fn: callable
@param hints_fn: Callback doing some analysis on the filter
@type level: integer
@param level: Current depth
@type op: string
@param op: Operator
@param op_data: Functions implementing operators
@type operands: list
@param operands: List of operands
"""
# Unused arguments, pylint: disable=W0613
try:
(name, value) = operands
except (ValueError, TypeError):
raise errors.ParameterError("Invalid binary operator, expected exactly"
" two operands")
(fdef, datakind, field_flags, retrieval_fn) = self._LookupField(name)
assert fdef.kind != QFT_UNKNOWN
# TODO: Type conversions?
verify_fn = _VERIFY_FN[fdef.kind]
if not verify_fn(value):
raise errors.ParameterError("Unable to compare field '%s' (type '%s')"
" with '%s', expected %s" %
(name, fdef.kind, value.__class__.__name__,
verify_fn))
if hints_fn:
hints_fn(op, datakind, name, value)
for (fn_flags, fn, valprepfn) in op_data:
if fn_flags is None or fn_flags & field_flags:
# Prepare value if necessary (e.g. compile regular expression)
if valprepfn:
value = valprepfn(value)
return compat.partial(_WrapBinaryOp, fn, retrieval_fn, value)
raise errors.ProgrammerError("Unable to find operator implementation"
" (op '%s', flags %s)" % (op, field_flags))
def _CompileFilter(fields, hints, qfilter):
"""Converts a query filter into a callable function.
See L{_FilterCompilerHelper} for details.
@rtype: callable
"""
return _FilterCompilerHelper(fields)(hints, qfilter)
class Query:
def __init__(self, fieldlist, selected, qfilter=None, namefield=None):
"""Initializes this class.
The field definition is a dictionary with the field's name as a key and a
tuple containing, in order, the field definition object
(L{objects.QueryFieldDefinition}, the data kind to help calling code
collect data and a retrieval function. The retrieval function is called
with two parameters, in order, the data container and the item in container
(see L{Query.Query}).
Users of this class can call L{RequestedData} before preparing the data
container to determine what data is needed.
@type fieldlist: dictionary
@param fieldlist: Field definitions
@type selected: list of strings
@param selected: List of selected fields
"""
assert namefield is None or namefield in fieldlist
self._fields = _GetQueryFields(fieldlist, selected)
self._filter_fn = None
self._requested_names = None
self._filter_datakinds = frozenset()
if qfilter is not None:
# Collect requested names if wanted
if namefield:
hints = _FilterHints(namefield)
else:
hints = None
# Build filter function
self._filter_fn = _CompileFilter(fieldlist, hints, qfilter)
if hints:
self._requested_names = hints.RequestedNames()
self._filter_datakinds = hints.ReferencedData()
if namefield is None:
self._name_fn = None
else:
(_, _, _, self._name_fn) = fieldlist[namefield]
def RequestedNames(self):
"""Returns all names referenced in the filter.
If there is no filter or operators are preventing determining the exact
names, C{None} is returned.
"""
return self._requested_names
def RequestedData(self):
"""Gets requested kinds of data.
@rtype: frozenset
"""
return (self._filter_datakinds |
frozenset(datakind for (_, datakind, _, _) in self._fields
if datakind is not None))
def GetFields(self):
"""Returns the list of fields for this query.
Includes unknown fields.
@rtype: List of L{objects.QueryFieldDefinition}
"""
return GetAllFields(self._fields)
def Query(self, ctx, sort_by_name=True):
"""Execute a query.
@param ctx: Data container passed to field retrieval functions, must
support iteration using C{__iter__}
@type sort_by_name: boolean
@param sort_by_name: Whether to sort by name or keep the input data's
ordering
"""
sort = (self._name_fn and sort_by_name)
result = []
for idx, item in enumerate(ctx):
if not (self._filter_fn is None or self._filter_fn(ctx, item)):
continue
row = [_ProcessResult(fn(ctx, item)) for (_, _, _, fn) in self._fields]
# Verify result
if __debug__:
_VerifyResultRow(self._fields, row)
if sort:
(status, name) = _ProcessResult(self._name_fn(ctx, item))
assert status == constants.RS_NORMAL
# TODO: Are there cases where we wouldn't want to use NiceSort?
# Answer: if the name field is non-string...
result.append((utils.NiceSortKey(name), idx, row))
else:
result.append(row)
if not sort:
return result
# TODO: Would "heapq" be more efficient than sorting?
# Sorting in-place instead of using "sorted()"
result.sort()
assert not result or (len(result[0]) == 3 and len(result[-1]) == 3)
return map(operator.itemgetter(2), result)
def OldStyleQuery(self, ctx, sort_by_name=True):
"""Query with "old" query result format.
See L{Query.Query} for arguments.
"""
unknown = set(fdef.name for (fdef, _, _, _) in self._fields
if fdef.kind == QFT_UNKNOWN)
if unknown:
raise errors.OpPrereqError("Unknown output fields selected: %s" %
(utils.CommaJoin(unknown), ),
errors.ECODE_INVAL)
return [[value for (_, value) in row]
for row in self.Query(ctx, sort_by_name=sort_by_name)]
def _ProcessResult(value):
"""Converts result values into externally-visible ones.
"""
if value is _FS_UNKNOWN:
return (RS_UNKNOWN, None)
elif value is _FS_NODATA:
return (RS_NODATA, None)
elif value is _FS_UNAVAIL:
return (RS_UNAVAIL, None)
elif value is _FS_OFFLINE:
return (RS_OFFLINE, None)
else:
return (RS_NORMAL, value)
def _VerifyResultRow(fields, row):
"""Verifies the contents of a query result row.
@type fields: list
@param fields: Field definitions for result
@type row: list of tuples
@param row: Row data
"""
assert len(row) == len(fields)
errs = []
for ((status, value), (fdef, _, _, _)) in zip(row, fields):
if status == RS_NORMAL:
if not _VERIFY_FN[fdef.kind](value):
errs.append("normal field %s fails validation (value is %s)" %
(fdef.name, value))
elif value is not None:
errs.append("abnormal field %s has a non-None value" % fdef.name)
assert not errs, ("Failed validation: %s in row %s" %
(utils.CommaJoin(errs), row))
def _FieldDictKey((fdef, _, flags, fn)):
"""Generates key for field dictionary.
"""
assert fdef.name and fdef.title, "Name and title are required"
assert FIELD_NAME_RE.match(fdef.name)
assert TITLE_RE.match(fdef.title)
assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and
fdef.doc.strip() == fdef.doc), \
"Invalid description for field '%s'" % fdef.name
assert callable(fn)
assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name
return fdef.name
def _PrepareFieldList(fields, aliases):
"""Prepares field list for use by L{Query}.
Converts the list to a dictionary and does some verification.
@type fields: list of tuples; (L{objects.QueryFieldDefinition}, data
kind, retrieval function)
@param fields: List of fields, see L{Query.__init__} for a better
description
@type aliases: list of tuples; (alias, target)
@param aliases: list of tuples containing aliases; for each
alias/target pair, a duplicate will be created in the field list
@rtype: dict
@return: Field dictionary for L{Query}
"""
if __debug__:
duplicates = utils.FindDuplicates(fdef.title.lower()
for (fdef, _, _, _) in fields)
assert not duplicates, "Duplicate title(s) found: %r" % duplicates
result = utils.SequenceToDict(fields, key=_FieldDictKey)
for alias, target in aliases:
assert alias not in result, "Alias %s overrides an existing field" % alias
assert target in result, "Missing target %s for alias %s" % (target, alias)
(fdef, k, flags, fn) = result[target]
fdef = fdef.Copy()
fdef.name = alias
result[alias] = (fdef, k, flags, fn)
assert len(result) == len(fields) + len(aliases)
assert compat.all(name == fdef.name
for (name, (fdef, _, _, _)) in result.items())
return result
def GetQueryResponse(query, ctx, sort_by_name=True):
"""Prepares the response for a query.
@type query: L{Query}
@param ctx: Data container, see L{Query.Query}
@type sort_by_name: boolean
@param sort_by_name: Whether to sort by name or keep the input data's
ordering
"""
return objects.QueryResponse(data=query.Query(ctx, sort_by_name=sort_by_name),
fields=query.GetFields()).ToDict()
def QueryFields(fielddefs, selected):
"""Returns list of available fields.
@type fielddefs: dict
@param fielddefs: Field definitions
@type selected: list of strings
@param selected: List of selected fields
@return: List of L{objects.QueryFieldDefinition}
"""
if selected is None:
# Client requests all fields, sort by name
fdefs = utils.NiceSort(GetAllFields(fielddefs.values()),
key=operator.attrgetter("name"))
else:
# Keep order as requested by client
fdefs = Query(fielddefs, selected).GetFields()
return objects.QueryFieldsResponse(fields=fdefs).ToDict()
def _MakeField(name, title, kind, doc):
"""Wrapper for creating L{objects.QueryFieldDefinition} instances.
@param name: Field name as a regular expression
@param title: Human-readable title
@param kind: Field type
@param doc: Human-readable description
"""
return objects.QueryFieldDefinition(name=name, title=title, kind=kind,
doc=doc)
def _StaticValueInner(value, ctx, _): # pylint: disable=W0613
"""Returns a static value.
"""
return value
def _StaticValue(value):
"""Prepares a function to return a static value.
"""
return compat.partial(_StaticValueInner, value)
def _GetNodeRole(node, master_uuid):
"""Determine node role.
@type node: L{objects.Node}
@param node: Node object
@type master_uuid: string
@param master_uuid: Master node UUID
"""
if node.uuid == master_uuid:
return constants.NR_MASTER
elif node.master_candidate:
return constants.NR_MCANDIDATE
elif node.drained:
return constants.NR_DRAINED
elif node.offline:
return constants.NR_OFFLINE
else:
return constants.NR_REGULAR
def _GetItemAttr(attr):
"""Returns a field function to return an attribute of the item.
@param attr: Attribute name
"""
getter = operator.attrgetter(attr)
return lambda _, item: getter(item)
def _GetItemMaybeAttr(attr):
"""Returns a field function to return a not-None attribute of the item.
If the value is None, then C{_FS_UNAVAIL} will be returned instead.
@param attr: Attribute name
"""
def _helper(_, obj):
val = getattr(obj, attr)
if val is None:
return _FS_UNAVAIL
else:
return val
return _helper
def _GetNDParam(name):
"""Return a field function to return an ND parameter out of the context.
"""
def _helper(ctx, _):
if ctx.ndparams is None:
return _FS_UNAVAIL
else:
return ctx.ndparams.get(name, None)
return _helper
def _BuildNDFields(is_group):
"""Builds all the ndparam fields.
@param is_group: whether this is called at group or node level
"""
if is_group:
field_kind = GQ_CONFIG
else:
field_kind = NQ_GROUP
return [(_MakeField("ndp/%s" % name,
constants.NDS_PARAMETER_TITLES.get(name,
"ndp/%s" % name),
_VTToQFT[kind], "The \"%s\" node parameter" % name),
field_kind, 0, _GetNDParam(name))
for name, kind in constants.NDS_PARAMETER_TYPES.items()]
def _ConvWrapInner(convert, fn, ctx, item):
"""Wrapper for converting values.
@param convert: Conversion function receiving value as single parameter
@param fn: Retrieval function
"""
value = fn(ctx, item)
# Is the value an abnormal status?
if compat.any(value is fs for fs in _FS_ALL):
# Return right away
return value
# TODO: Should conversion function also receive context, item or both?
return convert(value)
def _ConvWrap(convert, fn):
"""Convenience wrapper for L{_ConvWrapInner}.
@param convert: Conversion function receiving value as single parameter
@param fn: Retrieval function
"""
return compat.partial(_ConvWrapInner, convert, fn)
def _GetItemTimestamp(getter):
"""Returns function for getting timestamp of item.
@type getter: callable
@param getter: Function to retrieve timestamp attribute
"""
def fn(_, item):
"""Returns a timestamp of item.
"""
timestamp = getter(item)
if timestamp is None:
# Old configs might not have all timestamps
return _FS_UNAVAIL
else:
return timestamp
return fn
def _GetItemTimestampFields(datatype):
"""Returns common timestamp fields.
@param datatype: Field data type for use by L{Query.RequestedData}
"""
return [
(_MakeField("ctime", "CTime", QFT_TIMESTAMP, "Creation timestamp"),
datatype, 0, _GetItemTimestamp(operator.attrgetter("ctime"))),
(_MakeField("mtime", "MTime", QFT_TIMESTAMP, "Modification timestamp"),
datatype, 0, _GetItemTimestamp(operator.attrgetter("mtime"))),
]
class NodeQueryData:
"""Data container for node data queries.
"""
def __init__(self, nodes, live_data, master_uuid, node_to_primary,
node_to_secondary, inst_uuid_to_inst_name, groups, oob_support,
cluster):
"""Initializes this class.
"""
self.nodes = nodes
self.live_data = live_data
self.master_uuid = master_uuid
self.node_to_primary = node_to_primary
self.node_to_secondary = node_to_secondary
self.inst_uuid_to_inst_name = inst_uuid_to_inst_name
self.groups = groups
self.oob_support = oob_support
self.cluster = cluster
# Used for individual rows
self.curlive_data = None
self.ndparams = None
def __iter__(self):
"""Iterate over all nodes.
This function has side-effects and only one instance of the resulting
generator should be used at a time.
"""
for node in self.nodes:
group = self.groups.get(node.group, None)
if group is None:
self.ndparams = None
else:
self.ndparams = self.cluster.FillND(node, group)
if self.live_data:
self.curlive_data = self.live_data.get(node.uuid, None)
else:
self.curlive_data = None
yield node
#: Fields that are direct attributes of an L{objects.Node} object
_NODE_SIMPLE_FIELDS = {
"drained": ("Drained", QFT_BOOL, 0, "Whether node is drained"),
"master_candidate": ("MasterC", QFT_BOOL, 0,
"Whether node is a master candidate"),
"master_capable": ("MasterCapable", QFT_BOOL, 0,
"Whether node can become a master candidate"),
"name": ("Node", QFT_TEXT, QFF_HOSTNAME, "Node name"),
"offline": ("Offline", QFT_BOOL, 0, "Whether node is marked offline"),
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Node"),
"uuid": ("UUID", QFT_TEXT, 0, "Node UUID"),
"vm_capable": ("VMCapable", QFT_BOOL, 0, "Whether node can host instances"),
}
#: Fields requiring talking to the node
# Note that none of these are available for non-vm_capable nodes
_NODE_LIVE_FIELDS = {
"bootid": ("BootID", QFT_TEXT, "bootid",
"Random UUID renewed for each system reboot, can be used"
" for detecting reboots by tracking changes"),
"cnodes": ("CNodes", QFT_NUMBER, "cpu_nodes",
"Number of NUMA domains on node (if exported by hypervisor)"),
"cnos": ("CNOs", QFT_NUMBER, "cpu_dom0",
"Number of logical processors used by the node OS (dom0 for Xen)"),
"csockets": ("CSockets", QFT_NUMBER, "cpu_sockets",
"Number of physical CPU sockets (if exported by hypervisor)"),
"ctotal": ("CTotal", QFT_NUMBER, "cpu_total", "Number of logical processors"),
"dfree": ("DFree", QFT_UNIT, "storage_free",
"Available storage space in storage unit"),
"dtotal": ("DTotal", QFT_UNIT, "storage_size",
"Total storage space in storage unit used for instance disk"
" allocation"),
"spfree": ("SpFree", QFT_NUMBER, "spindles_free",
"Available spindles in volume group (exclusive storage only)"),
"sptotal": ("SpTotal", QFT_NUMBER, "spindles_total",
"Total spindles in volume group (exclusive storage only)"),
"mfree": ("MFree", QFT_UNIT, "memory_free",
"Memory available for instance allocations"),
"mnode": ("MNode", QFT_UNIT, "memory_dom0",
"Amount of memory used by node (dom0 for Xen)"),
"mtotal": ("MTotal", QFT_UNIT, "memory_total",
"Total amount of memory of physical machine"),
}
def _GetGroup(cb):
"""Build function for calling another function with an node group.
@param cb: The callback to be called with the nodegroup
"""
def fn(ctx, node):
"""Get group data for a node.
@type ctx: L{NodeQueryData}
@type inst: L{objects.Node}
@param inst: Node object
"""
ng = ctx.groups.get(node.group, None)
if ng is None:
# Nodes always have a group, or the configuration is corrupt
return _FS_UNAVAIL
return cb(ctx, node, ng)
return fn
def _GetNodeGroup(ctx, node, ng): # pylint: disable=W0613
"""Returns the name of a node's group.
@type ctx: L{NodeQueryData}
@type node: L{objects.Node}
@param node: Node object
@type ng: L{objects.NodeGroup}
@param ng: The node group this node belongs to
"""
return ng.name
def _GetNodePower(ctx, node):
"""Returns the node powered state
@type ctx: L{NodeQueryData}
@type node: L{objects.Node}
@param node: Node object
"""
if ctx.oob_support[node.uuid]:
return node.powered
return _FS_UNAVAIL
def _GetNdParams(ctx, node, ng):
"""Returns the ndparams for this node.
@type ctx: L{NodeQueryData}
@type node: L{objects.Node}
@param node: Node object
@type ng: L{objects.NodeGroup}
@param ng: The node group this node belongs to
"""
return ctx.cluster.SimpleFillND(ng.FillND(node))
def _GetLiveNodeField(field, kind, ctx, node):
"""Gets the value of a "live" field from L{NodeQueryData}.
@param field: Live field name
@param kind: Data kind, one of L{constants.QFT_ALL}
@type ctx: L{NodeQueryData}
@type node: L{objects.Node}
@param node: Node object
"""
if node.offline:
return _FS_OFFLINE
if not node.vm_capable:
return _FS_UNAVAIL
if not ctx.curlive_data:
return _FS_NODATA
return _GetStatsField(field, kind, ctx.curlive_data)
def _GetStatsField(field, kind, data):
"""Gets a value from live statistics.
If the value is not found, L{_FS_UNAVAIL} is returned. If the field kind is
numeric a conversion to integer is attempted. If that fails, L{_FS_UNAVAIL}
is returned.
@param field: Live field name
@param kind: Data kind, one of L{constants.QFT_ALL}
@type data: dict
@param data: Statistics
"""
try:
value = data[field]
except KeyError:
return _FS_UNAVAIL
if kind == QFT_TEXT:
return value
assert kind in (QFT_NUMBER, QFT_UNIT)
# Try to convert into number
try:
return int(value)
except (ValueError, TypeError):
logging.exception("Failed to convert node field '%s' (value %r) to int",
field, value)
return _FS_UNAVAIL
def _GetNodeHvState(_, node):
"""Converts node's hypervisor state for query result.
"""
hv_state = node.hv_state
if hv_state is None:
return _FS_UNAVAIL
return dict((name, value.ToDict()) for (name, value) in hv_state.items())
def _GetNodeDiskState(_, node):
"""Converts node's disk state for query result.
"""
disk_state = node.disk_state
if disk_state is None:
return _FS_UNAVAIL
return dict((disk_kind, dict((name, value.ToDict())
for (name, value) in kind_state.items()))
for (disk_kind, kind_state) in disk_state.items())
def _BuildNodeFields():
"""Builds list of fields for node queries.
"""
fields = [
(_MakeField("pip", "PrimaryIP", QFT_TEXT, "Primary IP address"),
NQ_CONFIG, 0, _GetItemAttr("primary_ip")),
(_MakeField("sip", "SecondaryIP", QFT_TEXT, "Secondary IP address"),
NQ_CONFIG, 0, _GetItemAttr("secondary_ip")),
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), NQ_CONFIG, 0,
lambda ctx, node: list(node.GetTags())),
(_MakeField("master", "IsMaster", QFT_BOOL, "Whether node is master"),
NQ_CONFIG, 0, lambda ctx, node: node.uuid == ctx.master_uuid),
(_MakeField("group", "Group", QFT_TEXT, "Node group"), NQ_GROUP, 0,
_GetGroup(_GetNodeGroup)),
(_MakeField("group.uuid", "GroupUUID", QFT_TEXT, "UUID of node group"),
NQ_CONFIG, 0, _GetItemAttr("group")),
(_MakeField("powered", "Powered", QFT_BOOL,
"Whether node is thought to be powered on"),
NQ_OOB, 0, _GetNodePower),
(_MakeField("ndparams", "NodeParameters", QFT_OTHER,
"Merged node parameters"),
NQ_GROUP, 0, _GetGroup(_GetNdParams)),
(_MakeField("custom_ndparams", "CustomNodeParameters", QFT_OTHER,
"Custom node parameters"),
NQ_GROUP, 0, _GetItemAttr("ndparams")),
(_MakeField("hv_state", "HypervisorState", QFT_OTHER, "Hypervisor state"),
NQ_CONFIG, 0, _GetNodeHvState),
(_MakeField("disk_state", "DiskState", QFT_OTHER, "Disk state"),
NQ_CONFIG, 0, _GetNodeDiskState),
]
fields.extend(_BuildNDFields(False))
# Node role
role_values = (constants.NR_MASTER, constants.NR_MCANDIDATE,
constants.NR_REGULAR, constants.NR_DRAINED,
constants.NR_OFFLINE)
role_doc = ("Node role; \"%s\" for master, \"%s\" for master candidate,"
" \"%s\" for regular, \"%s\" for drained, \"%s\" for offline" %
role_values)
fields.append((_MakeField("role", "Role", QFT_TEXT, role_doc), NQ_CONFIG, 0,
lambda ctx, node: _GetNodeRole(node, ctx.master_uuid)))
assert set(role_values) == constants.NR_ALL
def _GetLength(getter):
return lambda ctx, node: len(getter(ctx)[node.uuid])
def _GetList(getter):
return lambda ctx, node: utils.NiceSort(
[ctx.inst_uuid_to_inst_name[uuid]
for uuid in getter(ctx)[node.uuid]])
# Add fields operating on instance lists
for prefix, titleprefix, docword, getter in \
[("p", "Pri", "primary", operator.attrgetter("node_to_primary")),
("s", "Sec", "secondary", operator.attrgetter("node_to_secondary"))]:
# TODO: Allow filterting by hostname in list
fields.extend([
(_MakeField("%sinst_cnt" % prefix, "%sinst" % prefix.upper(), QFT_NUMBER,
"Number of instances with this node as %s" % docword),
NQ_INST, 0, _GetLength(getter)),
(_MakeField("%sinst_list" % prefix, "%sInstances" % titleprefix,
QFT_OTHER,
"List of instances with this node as %s" % docword),
NQ_INST, 0, _GetList(getter)),
])
# Add simple fields
fields.extend([
(_MakeField(name, title, kind, doc), NQ_CONFIG, flags, _GetItemAttr(name))
for (name, (title, kind, flags, doc)) in _NODE_SIMPLE_FIELDS.items()])
# Add fields requiring live data
fields.extend([
(_MakeField(name, title, kind, doc), NQ_LIVE, 0,
compat.partial(_GetLiveNodeField, nfield, kind))
for (name, (title, kind, nfield, doc)) in _NODE_LIVE_FIELDS.items()])
# Add timestamps
fields.extend(_GetItemTimestampFields(NQ_CONFIG))
return _PrepareFieldList(fields, [])
class InstanceQueryData:
"""Data container for instance data queries.
"""
def __init__(self, instances, cluster, disk_usage, offline_node_uuids,
bad_node_uuids, live_data, wrongnode_inst, console, nodes,
groups, networks):
"""Initializes this class.
@param instances: List of instance objects
@param cluster: Cluster object
@type disk_usage: dict; instance UUID as key
@param disk_usage: Per-instance disk usage
@type offline_node_uuids: list of strings
@param offline_node_uuids: List of offline nodes
@type bad_node_uuids: list of strings
@param bad_node_uuids: List of faulty nodes
@type live_data: dict; instance UUID as key
@param live_data: Per-instance live data
@type wrongnode_inst: set
@param wrongnode_inst: Set of instances running on wrong node(s)
@type console: dict; instance UUID as key
@param console: Per-instance console information
@type nodes: dict; node UUID as key
@param nodes: Node objects
@type networks: dict; net_uuid as key
@param networks: Network objects
"""
assert len(set(bad_node_uuids) & set(offline_node_uuids)) == \
len(offline_node_uuids), \
"Offline nodes not included in bad nodes"
assert not (set(live_data.keys()) & set(bad_node_uuids)), \
"Found live data for bad or offline nodes"
self.instances = instances
self.cluster = cluster
self.disk_usage = disk_usage
self.offline_nodes = offline_node_uuids
self.bad_nodes = bad_node_uuids
self.live_data = live_data
self.wrongnode_inst = wrongnode_inst
self.console = console
self.nodes = nodes
self.groups = groups
self.networks = networks
# Used for individual rows
self.inst_hvparams = None
self.inst_beparams = None
self.inst_osparams = None
self.inst_nicparams = None
def __iter__(self):
"""Iterate over all instances.
This function has side-effects and only one instance of the resulting
generator should be used at a time.
"""
for inst in self.instances:
self.inst_hvparams = self.cluster.FillHV(inst, skip_globals=True)
self.inst_beparams = self.cluster.FillBE(inst)
self.inst_osparams = self.cluster.SimpleFillOS(inst.os, inst.osparams)
self.inst_nicparams = [self.cluster.SimpleFillNIC(nic.nicparams)
for nic in inst.nics]
yield inst
def _GetInstOperState(ctx, inst):
"""Get instance's operational status.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
# Can't use RS_OFFLINE here as it would describe the instance to
# be offline when we actually don't know due to missing data
if inst.primary_node in ctx.bad_nodes:
return _FS_NODATA
else:
return bool(ctx.live_data.get(inst.uuid))
def _GetInstLiveData(name):
"""Build function for retrieving live data.
@type name: string
@param name: Live data field name
"""
def fn(ctx, inst):
"""Get live data for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
if (inst.primary_node in ctx.bad_nodes or
inst.primary_node in ctx.offline_nodes):
# Can't use RS_OFFLINE here as it would describe the instance to be
# offline when we actually don't know due to missing data
return _FS_NODATA
if inst.uuid in ctx.live_data:
data = ctx.live_data[inst.uuid]
if name in data:
return data[name]
return _FS_UNAVAIL
return fn
def _GetInstStatus(ctx, inst):
"""Get instance status.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
if inst.primary_node in ctx.offline_nodes:
return constants.INSTST_NODEOFFLINE
if inst.primary_node in ctx.bad_nodes:
return constants.INSTST_NODEDOWN
if bool(ctx.live_data.get(inst.uuid)):
if inst.uuid in ctx.wrongnode_inst:
return constants.INSTST_WRONGNODE
elif inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_RUNNING
else:
return constants.INSTST_ERRORUP
if inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_ERRORDOWN
elif inst.admin_state == constants.ADMINST_DOWN:
return constants.INSTST_ADMINDOWN
return constants.INSTST_ADMINOFFLINE
def _GetInstDisk(index, cb):
"""Build function for calling another function with an instance Disk.
@type index: int
@param index: Disk index
@type cb: callable
@param cb: Callback
"""
def fn(ctx, inst):
"""Call helper function with instance Disk.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
nic = inst.disks[index]
except IndexError:
return _FS_UNAVAIL
return cb(ctx, index, nic)
return fn
def _GetInstDiskSize(ctx, _, disk): # pylint: disable=W0613
"""Get a Disk's size.
@type ctx: L{InstanceQueryData}
@type disk: L{objects.Disk}
@param disk: The Disk object
"""
if disk.size is None:
return _FS_UNAVAIL
else:
return disk.size
def _GetInstDiskSpindles(ctx, _, disk): # pylint: disable=W0613
"""Get a Disk's spindles.
@type disk: L{objects.Disk}
@param disk: The Disk object
"""
if disk.spindles is None:
return _FS_UNAVAIL
else:
return disk.spindles
def _GetInstDeviceName(ctx, _, device): # pylint: disable=W0613
"""Get a Device's Name.
@type ctx: L{InstanceQueryData}
@type device: L{objects.NIC} or L{objects.Disk}
@param device: The NIC or Disk object
"""
if device.name is None:
return _FS_UNAVAIL
else:
return device.name
def _GetInstDeviceUUID(ctx, _, device): # pylint: disable=W0613
"""Get a Device's UUID.
@type ctx: L{InstanceQueryData}
@type device: L{objects.NIC} or L{objects.Disk}
@param device: The NIC or Disk object
"""
if device.uuid is None:
return _FS_UNAVAIL
else:
return device.uuid
def _GetInstNic(index, cb):
"""Build function for calling another function with an instance NIC.
@type index: int
@param index: NIC index
@type cb: callable
@param cb: Callback
"""
def fn(ctx, inst):
"""Call helper function with instance NIC.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
nic = inst.nics[index]
except IndexError:
return _FS_UNAVAIL
return cb(ctx, index, nic)
return fn
def _GetInstNicNetworkName(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's Network.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.network is None:
return _FS_UNAVAIL
else:
return ctx.networks[nic.network].name
def _GetInstNicNetwork(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's Network.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.network is None:
return _FS_UNAVAIL
else:
return nic.network
def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's IP address.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.ip is None:
return _FS_UNAVAIL
else:
return nic.ip
def _GetInstNicBridge(ctx, index, _):
"""Get a NIC's bridge.
@type ctx: L{InstanceQueryData}
@type index: int
@param index: NIC index
"""
assert len(ctx.inst_nicparams) >= index
nicparams = ctx.inst_nicparams[index]
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
return nicparams[constants.NIC_LINK]
else:
return _FS_UNAVAIL
def _GetInstNicVLan(ctx, index, _):
"""Get a NIC's VLAN.
@type ctx: L{InstanceQueryData}
@type index: int
@param index: NIC index
"""
assert len(ctx.inst_nicparams) >= index
nicparams = ctx.inst_nicparams[index]
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_OVS:
return nicparams[constants.NIC_VLAN]
else:
return _FS_UNAVAIL
def _GetInstAllNicNetworkNames(ctx, inst):
"""Get all network names for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
result = []
for nic in inst.nics:
name = None
if nic.network:
name = ctx.networks[nic.network].name
result.append(name)
assert len(result) == len(inst.nics)
return result
def _GetInstAllNicBridges(ctx, inst):
"""Get all network bridges for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
assert len(ctx.inst_nicparams) == len(inst.nics)
result = []
for nicp in ctx.inst_nicparams:
if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
result.append(nicp[constants.NIC_LINK])
else:
result.append(None)
assert len(result) == len(inst.nics)
return result
def _GetInstAllNicVlans(ctx, inst):
"""Get all network VLANs of an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
assert len(ctx.inst_nicparams) == len(inst.nics)
result = []
for nicp in ctx.inst_nicparams:
if nicp[constants.NIC_MODE] == constants.NIC_MODE_OVS:
result.append(nicp[constants.NIC_VLAN])
else:
result.append(None)
assert len(result) == len(inst.nics)
return result
def _GetInstNicParam(name):
"""Build function for retrieving a NIC parameter.
@type name: string
@param name: Parameter name
"""
def fn(ctx, index, _):
"""Get a NIC's bridge.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
@type nic: L{objects.NIC}
@param nic: NIC object
"""
assert len(ctx.inst_nicparams) >= index
return ctx.inst_nicparams[index][name]
return fn
def _GetInstanceNetworkFields():
"""Get instance fields involving network interfaces.
@return: Tuple containing list of field definitions used as input for
L{_PrepareFieldList} and a list of aliases
"""
nic_mac_fn = lambda ctx, _, nic: nic.mac
nic_mode_fn = _GetInstNicParam(constants.NIC_MODE)
nic_link_fn = _GetInstNicParam(constants.NIC_LINK)
fields = [
# All NICs
(_MakeField("nic.count", "NICs", QFT_NUMBER,
"Number of network interfaces"),
IQ_CONFIG, 0, lambda ctx, inst: len(inst.nics)),
(_MakeField("nic.macs", "NIC_MACs", QFT_OTHER,
"List containing each network interface's MAC address"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.mac for nic in inst.nics]),
(_MakeField("nic.ips", "NIC_IPs", QFT_OTHER,
"List containing each network interface's IP address"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.ip for nic in inst.nics]),
(_MakeField("nic.names", "NIC_Names", QFT_OTHER,
"List containing each network interface's name"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.name for nic in inst.nics]),
(_MakeField("nic.uuids", "NIC_UUIDs", QFT_OTHER,
"List containing each network interface's UUID"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.uuid for nic in inst.nics]),
(_MakeField("nic.modes", "NIC_modes", QFT_OTHER,
"List containing each network interface's mode"), IQ_CONFIG, 0,
lambda ctx, inst: [nicp[constants.NIC_MODE]
for nicp in ctx.inst_nicparams]),
(_MakeField("nic.links", "NIC_links", QFT_OTHER,
"List containing each network interface's link"), IQ_CONFIG, 0,
lambda ctx, inst: [nicp[constants.NIC_LINK]
for nicp in ctx.inst_nicparams]),
(_MakeField("nic.vlans", "NIC_VLANs", QFT_OTHER,
"List containing each network interface's VLAN"),
IQ_CONFIG, 0, _GetInstAllNicVlans),
(_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER,
"List containing each network interface's bridge"),
IQ_CONFIG, 0, _GetInstAllNicBridges),
(_MakeField("nic.networks", "NIC_networks", QFT_OTHER,
"List containing each interface's network"), IQ_CONFIG, 0,
lambda ctx, inst: [nic.network for nic in inst.nics]),
(_MakeField("nic.networks.names", "NIC_networks_names", QFT_OTHER,
"List containing each interface's network"),
IQ_NETWORKS, 0, _GetInstAllNicNetworkNames)
]
# NICs by number
for i in range(constants.MAX_NICS):
numtext = utils.FormatOrdinal(i + 1)
fields.extend([
(_MakeField("nic.ip/%s" % i, "NicIP/%s" % i, QFT_TEXT,
"IP address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicIp)),
(_MakeField("nic.mac/%s" % i, "NicMAC/%s" % i, QFT_TEXT,
"MAC address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mac_fn)),
(_MakeField("nic.name/%s" % i, "NicName/%s" % i, QFT_TEXT,
"Name address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceName)),
(_MakeField("nic.uuid/%s" % i, "NicUUID/%s" % i, QFT_TEXT,
"UUID address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceUUID)),
(_MakeField("nic.mode/%s" % i, "NicMode/%s" % i, QFT_TEXT,
"Mode of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mode_fn)),
(_MakeField("nic.link/%s" % i, "NicLink/%s" % i, QFT_TEXT,
"Link of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_link_fn)),
(_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT,
"Bridge of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
(_MakeField("nic.vlan/%s" % i, "NicVLAN/%s" % i, QFT_TEXT,
"VLAN of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicVLan)),
(_MakeField("nic.network/%s" % i, "NicNetwork/%s" % i, QFT_TEXT,
"Network of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicNetwork)),
(_MakeField("nic.network.name/%s" % i, "NicNetworkName/%s" % i, QFT_TEXT,
"Network name of %s network interface" % numtext),
IQ_NETWORKS, 0, _GetInstNic(i, _GetInstNicNetworkName)),
])
aliases = [
# Legacy fields for first NIC
("ip", "nic.ip/0"),
("mac", "nic.mac/0"),
("bridge", "nic.bridge/0"),
("nic_mode", "nic.mode/0"),
("nic_link", "nic.link/0"),
("nic_network", "nic.network/0"),
]
return (fields, aliases)
def _GetInstDiskUsage(ctx, inst):
"""Get disk usage for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
usage = ctx.disk_usage[inst.uuid]
if usage is None:
usage = 0
return usage
def _GetInstanceConsole(ctx, inst):
"""Get console information for instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
consinfo = ctx.console[inst.uuid]
if consinfo is None:
return _FS_UNAVAIL
return consinfo
def _GetInstanceDiskFields():
"""Get instance fields involving disks.
@return: List of field definitions used as input for L{_PrepareFieldList}
"""
fields = [
(_MakeField("disk_usage", "DiskUsage", QFT_UNIT,
"Total disk space used by instance on each of its nodes;"
" this is not the disk size visible to the instance, but"
" the usage on the node"),
IQ_DISKUSAGE, 0, _GetInstDiskUsage),
(_MakeField("disk.count", "Disks", QFT_NUMBER, "Number of disks"),
IQ_CONFIG, 0, lambda ctx, inst: len(inst.disks)),
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER, "List of disk sizes"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.size for disk in inst.disks]),
(_MakeField("disk.spindles", "Disk_spindles", QFT_OTHER,
"List of disk spindles"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.spindles for disk in inst.disks]),
(_MakeField("disk.names", "Disk_names", QFT_OTHER, "List of disk names"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.name for disk in inst.disks]),
(_MakeField("disk.uuids", "Disk_UUIDs", QFT_OTHER, "List of disk UUIDs"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.uuid for disk in inst.disks]),
]
# Disks by number
for i in range(constants.MAX_DISKS):
numtext = utils.FormatOrdinal(i + 1)
fields.extend([
(_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT,
"Disk size of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSize)),
(_MakeField("disk.spindles/%s" % i, "DiskSpindles/%s" % i, QFT_NUMBER,
"Spindles of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSpindles)),
(_MakeField("disk.name/%s" % i, "DiskName/%s" % i, QFT_TEXT,
"Name of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceName)),
(_MakeField("disk.uuid/%s" % i, "DiskUUID/%s" % i, QFT_TEXT,
"UUID of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceUUID))])
return fields
def _GetInstanceParameterFields():
"""Get instance fields involving parameters.
@return: List of field definitions used as input for L{_PrepareFieldList}
"""
fields = [
# Filled parameters
(_MakeField("hvparams", "HypervisorParameters", QFT_OTHER,
"Hypervisor parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_hvparams),
(_MakeField("beparams", "BackendParameters", QFT_OTHER,
"Backend parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_beparams),
(_MakeField("osparams", "OpSysParameters", QFT_OTHER,
"Operating system parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_osparams),
# Unfilled parameters
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER,
"Custom hypervisor parameters"),
IQ_CONFIG, 0, _GetItemAttr("hvparams")),
(_MakeField("custom_beparams", "CustomBackendParameters", QFT_OTHER,
"Custom backend parameters",),
IQ_CONFIG, 0, _GetItemAttr("beparams")),
(_MakeField("custom_osparams", "CustomOpSysParameters", QFT_OTHER,
"Custom operating system parameters",),
IQ_CONFIG, 0, _GetItemAttr("osparams")),
(_MakeField("custom_nicparams", "CustomNicParameters", QFT_OTHER,
"Custom network interface parameters"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.nicparams for nic in inst.nics]),
]
# HV params
def _GetInstHvParam(name):
return lambda ctx, _: ctx.inst_hvparams.get(name, _FS_UNAVAIL)
fields.extend([
(_MakeField("hv/%s" % name,
constants.HVS_PARAMETER_TITLES.get(name, "hv/%s" % name),
_VTToQFT[kind], "The \"%s\" hypervisor parameter" % name),
IQ_CONFIG, 0, _GetInstHvParam(name))
for name, kind in constants.HVS_PARAMETER_TYPES.items()
if name not in constants.HVC_GLOBALS])
# BE params
def _GetInstBeParam(name):
return lambda ctx, _: ctx.inst_beparams.get(name, None)
fields.extend([
(_MakeField("be/%s" % name,
constants.BES_PARAMETER_TITLES.get(name, "be/%s" % name),
_VTToQFT[kind], "The \"%s\" backend parameter" % name),
IQ_CONFIG, 0, _GetInstBeParam(name))
for name, kind in constants.BES_PARAMETER_TYPES.items()])
return fields
_INST_SIMPLE_FIELDS = {
"disk_template": ("Disk_template", QFT_TEXT, 0, "Instance disk template"),
"hypervisor": ("Hypervisor", QFT_TEXT, 0, "Hypervisor name"),
"name": ("Instance", QFT_TEXT, QFF_HOSTNAME, "Instance name"),
# Depending on the hypervisor, the port can be None
"network_port": ("Network_port", QFT_OTHER, 0,
"Instance network port if available (e.g. for VNC console)"),
"os": ("OS", QFT_TEXT, 0, "Operating system"),
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Instance"),
"uuid": ("UUID", QFT_TEXT, 0, "Instance UUID"),
}
def _GetNodeName(ctx, default, node_uuid):
"""Gets node name of a node.
@type ctx: L{InstanceQueryData}
@param default: Default value
@type node_uuid: string
@param node_uuid: Node UUID
"""
try:
node = ctx.nodes[node_uuid]
except KeyError:
return default
else:
return node.name
def _GetInstNodeGroup(ctx, default, node_uuid):
"""Gets group UUID of an instance node.
@type ctx: L{InstanceQueryData}
@param default: Default value
@type node_uuid: string
@param node_uuid: Node UUID
"""
try:
node = ctx.nodes[node_uuid]
except KeyError:
return default
else:
return node.group
def _GetInstNodeGroupName(ctx, default, node_uuid):
"""Gets group name of an instance node.
@type ctx: L{InstanceQueryData}
@param default: Default value
@type node_uuid: string
@param node_uuid: Node UUID
"""
try:
node = ctx.nodes[node_uuid]
except KeyError:
return default
try:
group = ctx.groups[node.group]
except KeyError:
return default
return group.name
def _BuildInstanceFields():
"""Builds list of fields for instance queries.
"""
fields = [
(_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"),
IQ_NODES, QFF_HOSTNAME,
lambda ctx, inst: _GetNodeName(ctx, None, inst.primary_node)),
(_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT,
"Primary node's group"),
IQ_NODES, 0,
lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL,
inst.primary_node)),
(_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT,
"Primary node's group UUID"),
IQ_NODES, 0,
lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)),
# TODO: Allow filtering by secondary node as hostname
(_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
"Secondary nodes; usually this will just be one node"),
IQ_NODES, 0,
lambda ctx, inst: map(compat.partial(_GetNodeName, ctx, None),
inst.secondary_nodes)),
(_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
"Node groups of secondary nodes"),
IQ_NODES, 0,
lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None),
inst.secondary_nodes)),
(_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER,
"Node group UUIDs of secondary nodes"),
IQ_NODES, 0,
lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
inst.secondary_nodes)),
(_MakeField("admin_state", "InstanceState", QFT_TEXT,
"Desired state of instance"),
IQ_CONFIG, 0, _GetItemAttr("admin_state")),
(_MakeField("admin_up", "Autostart", QFT_BOOL,
"Desired state of instance"),
IQ_CONFIG, 0, lambda ctx, inst: inst.admin_state == constants.ADMINST_UP),
(_MakeField("disks_active", "DisksActive", QFT_BOOL,
"Desired state of instance disks"),
IQ_CONFIG, 0, _GetItemAttr("disks_active")),
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
lambda ctx, inst: list(inst.GetTags())),
(_MakeField("console", "Console", QFT_OTHER,
"Instance console information"), IQ_CONSOLE, 0,
_GetInstanceConsole),
]
# Add simple fields
fields.extend([
(_MakeField(name, title, kind, doc), IQ_CONFIG, flags, _GetItemAttr(name))
for (name, (title, kind, flags, doc)) in _INST_SIMPLE_FIELDS.items()])
# Fields requiring talking to the node
fields.extend([
(_MakeField("oper_state", "Running", QFT_BOOL, "Actual state of instance"),
IQ_LIVE, 0, _GetInstOperState),
(_MakeField("oper_ram", "Memory", QFT_UNIT,
"Actual memory usage as seen by hypervisor"),
IQ_LIVE, 0, _GetInstLiveData("memory")),
(_MakeField("oper_vcpus", "VCPUs", QFT_NUMBER,
"Actual number of VCPUs as seen by hypervisor"),
IQ_LIVE, 0, _GetInstLiveData("vcpus")),
])
# Status field
status_values = (constants.INSTST_RUNNING, constants.INSTST_ADMINDOWN,
constants.INSTST_WRONGNODE, constants.INSTST_ERRORUP,
constants.INSTST_ERRORDOWN, constants.INSTST_NODEDOWN,
constants.INSTST_NODEOFFLINE, constants.INSTST_ADMINOFFLINE)
status_doc = ("Instance status; \"%s\" if instance is set to be running"
" and actually is, \"%s\" if instance is stopped and"
" is not running, \"%s\" if instance running, but not on its"
" designated primary node, \"%s\" if instance should be"
" stopped, but is actually running, \"%s\" if instance should"
" run, but doesn't, \"%s\" if instance's primary node is down,"
" \"%s\" if instance's primary node is marked offline,"
" \"%s\" if instance is offline and does not use dynamic"
" resources" % status_values)
fields.append((_MakeField("status", "Status", QFT_TEXT, status_doc),
IQ_LIVE, 0, _GetInstStatus))
assert set(status_values) == constants.INSTST_ALL, \
"Status documentation mismatch"
(network_fields, network_aliases) = _GetInstanceNetworkFields()
fields.extend(network_fields)
fields.extend(_GetInstanceParameterFields())
fields.extend(_GetInstanceDiskFields())
fields.extend(_GetItemTimestampFields(IQ_CONFIG))
aliases = [
("vcpus", "be/vcpus"),
("be/memory", "be/maxmem"),
("sda_size", "disk.size/0"),
("sdb_size", "disk.size/1"),
] + network_aliases
return _PrepareFieldList(fields, aliases)
class LockQueryData:
"""Data container for lock data queries.
"""
def __init__(self, lockdata):
"""Initializes this class.
"""
self.lockdata = lockdata
def __iter__(self):
"""Iterate over all locks.
"""
return iter(self.lockdata)
def _GetLockOwners(_, data):
"""Returns a sorted list of a lock's current owners.
"""
(_, _, owners, _) = data
if owners:
owners = utils.NiceSort(owners)
return owners
def _GetLockPending(_, data):
"""Returns a sorted list of a lock's pending acquires.
"""
(_, _, _, pending) = data
if pending:
pending = [(mode, utils.NiceSort(names))
for (mode, names) in pending]
return pending
def _BuildLockFields():
"""Builds list of fields for lock queries.
"""
return _PrepareFieldList([
# TODO: Lock names are not always hostnames. Should QFF_HOSTNAME be used?
(_MakeField("name", "Name", QFT_TEXT, "Lock name"), None, 0,
lambda ctx, (name, mode, owners, pending): name),
(_MakeField("mode", "Mode", QFT_OTHER,
"Mode in which the lock is currently acquired"
" (exclusive or shared)"),
LQ_MODE, 0, lambda ctx, (name, mode, owners, pending): mode),
(_MakeField("owner", "Owner", QFT_OTHER, "Current lock owner(s)"),
LQ_OWNER, 0, _GetLockOwners),
(_MakeField("pending", "Pending", QFT_OTHER,
"Threads waiting for the lock"),
LQ_PENDING, 0, _GetLockPending),
], [])
class GroupQueryData:
"""Data container for node group data queries.
"""
def __init__(self, cluster, groups, group_to_nodes, group_to_instances,
want_diskparams):
"""Initializes this class.
@param cluster: Cluster object
@param groups: List of node group objects
@type group_to_nodes: dict; group UUID as key
@param group_to_nodes: Per-group list of nodes
@type group_to_instances: dict; group UUID as key
@param group_to_instances: Per-group list of (primary) instances
@type want_diskparams: bool
@param want_diskparams: Whether diskparamters should be calculated
"""
self.groups = groups
self.group_to_nodes = group_to_nodes
self.group_to_instances = group_to_instances
self.cluster = cluster
self.want_diskparams = want_diskparams
# Used for individual rows
self.group_ipolicy = None
self.ndparams = None
self.group_dp = None
def __iter__(self):
"""Iterate over all node groups.
This function has side-effects and only one instance of the resulting
generator should be used at a time.
"""
for group in self.groups:
self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy)
self.ndparams = self.cluster.SimpleFillND(group.ndparams)
if self.want_diskparams:
self.group_dp = self.cluster.SimpleFillDP(group.diskparams)
else:
self.group_dp = None
yield group
_GROUP_SIMPLE_FIELDS = {
"alloc_policy": ("AllocPolicy", QFT_TEXT, "Allocation policy for group"),
"name": ("Group", QFT_TEXT, "Group name"),
"serial_no": ("SerialNo", QFT_NUMBER, _SERIAL_NO_DOC % "Group"),
"uuid": ("UUID", QFT_TEXT, "Group UUID"),
}
def _BuildGroupFields():
"""Builds list of fields for node group queries.
"""
# Add simple fields
fields = [(_MakeField(name, title, kind, doc), GQ_CONFIG, 0,
_GetItemAttr(name))
for (name, (title, kind, doc)) in _GROUP_SIMPLE_FIELDS.items()]
def _GetLength(getter):
return lambda ctx, group: len(getter(ctx)[group.uuid])
def _GetSortedList(getter):
return lambda ctx, group: utils.NiceSort(getter(ctx)[group.uuid])
group_to_nodes = operator.attrgetter("group_to_nodes")
group_to_instances = operator.attrgetter("group_to_instances")
# Add fields for nodes
fields.extend([
(_MakeField("node_cnt", "Nodes", QFT_NUMBER, "Number of nodes"),
GQ_NODE, 0, _GetLength(group_to_nodes)),
(_MakeField("node_list", "NodeList", QFT_OTHER, "List of nodes"),
GQ_NODE, 0, _GetSortedList(group_to_nodes)),
])
# Add fields for instances
fields.extend([
(_MakeField("pinst_cnt", "Instances", QFT_NUMBER,
"Number of primary instances"),
GQ_INST, 0, _GetLength(group_to_instances)),
(_MakeField("pinst_list", "InstanceList", QFT_OTHER,
"List of primary instances"),
GQ_INST, 0, _GetSortedList(group_to_instances)),
])
# Other fields
fields.extend([
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0,
lambda ctx, group: list(group.GetTags())),
(_MakeField("ipolicy", "InstancePolicy", QFT_OTHER,
"Instance policy limitations (merged)"),
GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy),
(_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER,
"Custom instance policy limitations"),
GQ_CONFIG, 0, _GetItemAttr("ipolicy")),
(_MakeField("custom_ndparams", "CustomNDParams", QFT_OTHER,
"Custom node parameters"),
GQ_CONFIG, 0, _GetItemAttr("ndparams")),
(_MakeField("ndparams", "NDParams", QFT_OTHER,
"Node parameters"),
GQ_CONFIG, 0, lambda ctx, _: ctx.ndparams),
(_MakeField("diskparams", "DiskParameters", QFT_OTHER,
"Disk parameters (merged)"),
GQ_DISKPARAMS, 0, lambda ctx, _: ctx.group_dp),
(_MakeField("custom_diskparams", "CustomDiskParameters", QFT_OTHER,
"Custom disk parameters"),
GQ_CONFIG, 0, _GetItemAttr("diskparams")),
])
# ND parameters
fields.extend(_BuildNDFields(True))
fields.extend(_GetItemTimestampFields(GQ_CONFIG))
return _PrepareFieldList(fields, [])
class OsInfo(objects.ConfigObject):
__slots__ = [
"name",
"valid",
"hidden",
"blacklisted",
"variants",
"api_versions",
"parameters",
"node_status",
]
def _BuildOsFields():
"""Builds list of fields for operating system queries.
"""
fields = [
(_MakeField("name", "Name", QFT_TEXT, "Operating system name"),
None, 0, _GetItemAttr("name")),
(_MakeField("valid", "Valid", QFT_BOOL,
"Whether operating system definition is valid"),
None, 0, _GetItemAttr("valid")),
(_MakeField("hidden", "Hidden", QFT_BOOL,
"Whether operating system is hidden"),
None, 0, _GetItemAttr("hidden")),
(_MakeField("blacklisted", "Blacklisted", QFT_BOOL,
"Whether operating system is blacklisted"),
None, 0, _GetItemAttr("blacklisted")),
(_MakeField("variants", "Variants", QFT_OTHER,
"Operating system variants"),
None, 0, _ConvWrap(utils.NiceSort, _GetItemAttr("variants"))),
(_MakeField("api_versions", "ApiVersions", QFT_OTHER,
"Operating system API versions"),
None, 0, _ConvWrap(sorted, _GetItemAttr("api_versions"))),
(_MakeField("parameters", "Parameters", QFT_OTHER,
"Operating system parameters"),
None, 0, _ConvWrap(compat.partial(utils.NiceSort, key=compat.fst),
_GetItemAttr("parameters"))),
(_MakeField("node_status", "NodeStatus", QFT_OTHER,
"Status from node"),
None, 0, _GetItemAttr("node_status")),
]
return _PrepareFieldList(fields, [])
class ExtStorageInfo(objects.ConfigObject):
__slots__ = [
"name",
"node_status",
"nodegroup_status",
"parameters",
]
def _BuildExtStorageFields():
"""Builds list of fields for extstorage provider queries.
"""
fields = [
(_MakeField("name", "Name", QFT_TEXT, "ExtStorage provider name"),
None, 0, _GetItemAttr("name")),
(_MakeField("node_status", "NodeStatus", QFT_OTHER,
"Status from node"),
None, 0, _GetItemAttr("node_status")),
(_MakeField("nodegroup_status", "NodegroupStatus", QFT_OTHER,
"Overall Nodegroup status"),
None, 0, _GetItemAttr("nodegroup_status")),
(_MakeField("parameters", "Parameters", QFT_OTHER,
"ExtStorage provider parameters"),
None, 0, _GetItemAttr("parameters")),
]
return _PrepareFieldList(fields, [])
def _JobUnavailInner(fn, ctx, (job_id, job)): # pylint: disable=W0613
"""Return L{_FS_UNAVAIL} if job is None.
When listing specifc jobs (e.g. "gnt-job list 1 2 3"), a job may not be
found, in which case this function converts it to L{_FS_UNAVAIL}.
"""
if job is None:
return _FS_UNAVAIL
else:
return fn(job)
def _JobUnavail(inner):
"""Wrapper for L{_JobUnavailInner}.
"""
return compat.partial(_JobUnavailInner, inner)
def _PerJobOpInner(fn, job):
"""Executes a function per opcode in a job.
"""
return map(fn, job.ops)
def _PerJobOp(fn):
"""Wrapper for L{_PerJobOpInner}.
"""
return _JobUnavail(compat.partial(_PerJobOpInner, fn))
def _JobTimestampInner(fn, job):
"""Converts unavailable timestamp to L{_FS_UNAVAIL}.
"""
timestamp = fn(job)
if timestamp is None:
return _FS_UNAVAIL
else:
return timestamp
def _JobTimestamp(fn):
"""Wrapper for L{_JobTimestampInner}.
"""
return _JobUnavail(compat.partial(_JobTimestampInner, fn))
def _BuildJobFields():
"""Builds list of fields for job queries.
"""
fields = [
(_MakeField("id", "ID", QFT_NUMBER, "Job ID"),
None, QFF_JOB_ID, lambda _, (job_id, job): job_id),
(_MakeField("status", "Status", QFT_TEXT, "Job status"),
None, 0, _JobUnavail(lambda job: job.CalcStatus())),
(_MakeField("priority", "Priority", QFT_NUMBER,
("Current job priority (%s to %s)" %
(constants.OP_PRIO_LOWEST, constants.OP_PRIO_HIGHEST))),
None, 0, _JobUnavail(lambda job: job.CalcPriority())),
(_MakeField("archived", "Archived", QFT_BOOL, "Whether job is archived"),
JQ_ARCHIVED, 0, lambda _, (job_id, job): job.archived),
(_MakeField("ops", "OpCodes", QFT_OTHER, "List of all opcodes"),
None, 0, _PerJobOp(lambda op: op.input.__getstate__())),
(_MakeField("opresult", "OpCode_result", QFT_OTHER,
"List of opcodes results"),
None, 0, _PerJobOp(operator.attrgetter("result"))),
(_MakeField("opstatus", "OpCode_status", QFT_OTHER,
"List of opcodes status"),
None, 0, _PerJobOp(operator.attrgetter("status"))),
(_MakeField("oplog", "OpCode_log", QFT_OTHER,
"List of opcode output logs"),
None, 0, _PerJobOp(operator.attrgetter("log"))),
(_MakeField("opstart", "OpCode_start", QFT_OTHER,
"List of opcode start timestamps (before acquiring locks)"),
None, 0, _PerJobOp(operator.attrgetter("start_timestamp"))),
(_MakeField("opexec", "OpCode_exec", QFT_OTHER,
"List of opcode execution start timestamps (after acquiring"
" locks)"),
None, 0, _PerJobOp(operator.attrgetter("exec_timestamp"))),
(_MakeField("opend", "OpCode_end", QFT_OTHER,
"List of opcode execution end timestamps"),
None, 0, _PerJobOp(operator.attrgetter("end_timestamp"))),
(_MakeField("oppriority", "OpCode_prio", QFT_OTHER,
"List of opcode priorities"),
None, 0, _PerJobOp(operator.attrgetter("priority"))),
(_MakeField("summary", "Summary", QFT_OTHER,
"List of per-opcode summaries"),
None, 0, _PerJobOp(lambda op: op.input.Summary())),
]
# Timestamp fields
for (name, attr, title, desc) in [
("received_ts", "received_timestamp", "Received",
"Timestamp of when job was received"),
("start_ts", "start_timestamp", "Start", "Timestamp of job start"),
("end_ts", "end_timestamp", "End", "Timestamp of job end"),
]:
getter = operator.attrgetter(attr)
fields.extend([
(_MakeField(name, title, QFT_OTHER,
"%s (tuple containing seconds and microseconds)" % desc),
None, QFF_SPLIT_TIMESTAMP, _JobTimestamp(getter)),
])
return _PrepareFieldList(fields, [])
def _GetExportName(_, (node_name, expname)): # pylint: disable=W0613
"""Returns an export name if available.
"""
if expname is None:
return _FS_NODATA
else:
return expname
def _BuildExportFields():
"""Builds list of fields for exports.
"""
fields = [
(_MakeField("node", "Node", QFT_TEXT, "Node name"),
None, QFF_HOSTNAME, lambda _, (node_name, expname): node_name),
(_MakeField("export", "Export", QFT_TEXT, "Export name"),
None, 0, _GetExportName),
]
return _PrepareFieldList(fields, [])
_CLUSTER_VERSION_FIELDS = {
"software_version": ("SoftwareVersion", QFT_TEXT, constants.RELEASE_VERSION,
"Software version"),
"protocol_version": ("ProtocolVersion", QFT_NUMBER,
constants.PROTOCOL_VERSION,
"RPC protocol version"),
"config_version": ("ConfigVersion", QFT_NUMBER, constants.CONFIG_VERSION,
"Configuration format version"),
"os_api_version": ("OsApiVersion", QFT_NUMBER, max(constants.OS_API_VERSIONS),
"API version for OS template scripts"),
"export_version": ("ExportVersion", QFT_NUMBER, constants.EXPORT_VERSION,
"Import/export file format version"),
"vcs_version": ("VCSVersion", QFT_TEXT, constants.VCS_VERSION,
"VCS version"),
}
_CLUSTER_SIMPLE_FIELDS = {
"cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"),
"volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"),
}
class ClusterQueryData:
def __init__(self, cluster, nodes, drain_flag, watcher_pause):
"""Initializes this class.
@type cluster: L{objects.Cluster}
@param cluster: Instance of cluster object
@type nodes: dict; node UUID as key
@param nodes: Node objects
@type drain_flag: bool
@param drain_flag: Whether job queue is drained
@type watcher_pause: number
@param watcher_pause: Until when watcher is paused (Unix timestamp)
"""
self._cluster = cluster
self.nodes = nodes
self.drain_flag = drain_flag
self.watcher_pause = watcher_pause
def __iter__(self):
return iter([self._cluster])
def _ClusterWatcherPause(ctx, _):
"""Returns until when watcher is paused (if available).
"""
if ctx.watcher_pause is None:
return _FS_UNAVAIL
else:
return ctx.watcher_pause
def _BuildClusterFields():
"""Builds list of fields for cluster information.
"""
fields = [
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), CQ_CONFIG, 0,
lambda ctx, cluster: list(cluster.GetTags())),
(_MakeField("architecture", "ArchInfo", QFT_OTHER,
"Architecture information"), None, 0,
lambda ctx, _: runtime.GetArchInfo()),
(_MakeField("drain_flag", "QueueDrained", QFT_BOOL,
"Flag whether job queue is drained"), CQ_QUEUE_DRAINED, 0,
lambda ctx, _: ctx.drain_flag),
(_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP,
"Until when watcher is paused"), CQ_WATCHER_PAUSE, 0,
_ClusterWatcherPause),
(_MakeField("master_node", "Master", QFT_TEXT, "Master node name"),
CQ_CONFIG, QFF_HOSTNAME,
lambda ctx, cluster: _GetNodeName(ctx, None, cluster.master_node)),
]
# Simple fields
fields.extend([
(_MakeField(name, title, kind, doc), CQ_CONFIG, flags, _GetItemAttr(name))
for (name, (title, kind, flags, doc)) in _CLUSTER_SIMPLE_FIELDS.items()
],)
# Version fields
fields.extend([
(_MakeField(name, title, kind, doc), None, 0, _StaticValue(value))
for (name, (title, kind, value, doc)) in _CLUSTER_VERSION_FIELDS.items()])
# Add timestamps
fields.extend(_GetItemTimestampFields(CQ_CONFIG))
return _PrepareFieldList(fields, [
("name", "cluster_name")])
class NetworkQueryData:
"""Data container for network data queries.
"""
def __init__(self, networks, network_to_groups,
network_to_instances, stats):
"""Initializes this class.
@param networks: List of network objects
@type network_to_groups: dict; network UUID as key
@param network_to_groups: Per-network list of groups
@type network_to_instances: dict; network UUID as key
@param network_to_instances: Per-network list of instances
@type stats: dict; network UUID as key
@param stats: Per-network usage statistics
"""
self.networks = networks
self.network_to_groups = network_to_groups
self.network_to_instances = network_to_instances
self.stats = stats
def __iter__(self):
"""Iterate over all networks.
"""
for net in self.networks:
if self.stats:
self.curstats = self.stats.get(net.uuid, None)
else:
self.curstats = None
yield net
_NETWORK_SIMPLE_FIELDS = {
"name": ("Network", QFT_TEXT, 0, "Name"),
"network": ("Subnet", QFT_TEXT, 0, "IPv4 subnet"),
"gateway": ("Gateway", QFT_OTHER, 0, "IPv4 gateway"),
"network6": ("IPv6Subnet", QFT_OTHER, 0, "IPv6 subnet"),
"gateway6": ("IPv6Gateway", QFT_OTHER, 0, "IPv6 gateway"),
"mac_prefix": ("MacPrefix", QFT_OTHER, 0, "MAC address prefix"),
"serial_no": ("SerialNo", QFT_NUMBER, 0, _SERIAL_NO_DOC % "Network"),
"uuid": ("UUID", QFT_TEXT, 0, "Network UUID"),
}
_NETWORK_STATS_FIELDS = {
"free_count": ("FreeCount", QFT_NUMBER, 0, "Number of available addresses"),
"reserved_count":
("ReservedCount", QFT_NUMBER, 0, "Number of reserved addresses"),
"map": ("Map", QFT_TEXT, 0, "Actual mapping"),
"external_reservations":
("ExternalReservations", QFT_TEXT, 0, "External reservations"),
}
def _GetNetworkStatsField(field, kind, ctx, _):
"""Gets the value of a "stats" field from L{NetworkQueryData}.
@param field: Field name
@param kind: Data kind, one of L{constants.QFT_ALL}
@type ctx: L{NetworkQueryData}
"""
return _GetStatsField(field, kind, ctx.curstats)
def _BuildNetworkFields():
"""Builds list of fields for network queries.
"""
fields = [
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
lambda ctx, inst: list(inst.GetTags())),
]
# Add simple fields
fields.extend([
(_MakeField(name, title, kind, doc),
NETQ_CONFIG, 0, _GetItemMaybeAttr(name))
for (name, (title, kind, _, doc)) in _NETWORK_SIMPLE_FIELDS.items()])
def _GetLength(getter):
return lambda ctx, network: len(getter(ctx)[network.uuid])
def _GetSortedList(getter):
return lambda ctx, network: utils.NiceSort(getter(ctx)[network.uuid])
network_to_groups = operator.attrgetter("network_to_groups")
network_to_instances = operator.attrgetter("network_to_instances")
# Add fields for node groups
fields.extend([
(_MakeField("group_cnt", "NodeGroups", QFT_NUMBER, "Number of nodegroups"),
NETQ_GROUP, 0, _GetLength(network_to_groups)),
(_MakeField("group_list", "GroupList", QFT_OTHER,
"List of nodegroups (group name, NIC mode, NIC link)"),
NETQ_GROUP, 0, lambda ctx, network: network_to_groups(ctx)[network.uuid]),
])
# Add fields for instances
fields.extend([
(_MakeField("inst_cnt", "Instances", QFT_NUMBER, "Number of instances"),
NETQ_INST, 0, _GetLength(network_to_instances)),
(_MakeField("inst_list", "InstanceList", QFT_OTHER, "List of instances"),
NETQ_INST, 0, _GetSortedList(network_to_instances)),
])
# Add fields for usage statistics
fields.extend([
(_MakeField(name, title, kind, doc), NETQ_STATS, 0,
compat.partial(_GetNetworkStatsField, name, kind))
for (name, (title, kind, _, doc)) in _NETWORK_STATS_FIELDS.items()])
# Add timestamps
fields.extend(_GetItemTimestampFields(IQ_NETWORKS))
return _PrepareFieldList(fields, [])
#: Fields for cluster information
CLUSTER_FIELDS = _BuildClusterFields()
#: Fields available for node queries
NODE_FIELDS = _BuildNodeFields()
#: Fields available for instance queries
INSTANCE_FIELDS = _BuildInstanceFields()
#: Fields available for lock queries
LOCK_FIELDS = _BuildLockFields()
#: Fields available for node group queries
GROUP_FIELDS = _BuildGroupFields()
#: Fields available for operating system queries
OS_FIELDS = _BuildOsFields()
#: Fields available for extstorage provider queries
EXTSTORAGE_FIELDS = _BuildExtStorageFields()
#: Fields available for job queries
JOB_FIELDS = _BuildJobFields()
#: Fields available for exports
EXPORT_FIELDS = _BuildExportFields()
#: Fields available for network queries
NETWORK_FIELDS = _BuildNetworkFields()
#: All available resources
ALL_FIELDS = {
constants.QR_CLUSTER: CLUSTER_FIELDS,
constants.QR_INSTANCE: INSTANCE_FIELDS,
constants.QR_NODE: NODE_FIELDS,
constants.QR_LOCK: LOCK_FIELDS,
constants.QR_GROUP: GROUP_FIELDS,
constants.QR_OS: OS_FIELDS,
constants.QR_EXTSTORAGE: EXTSTORAGE_FIELDS,
constants.QR_JOB: JOB_FIELDS,
constants.QR_EXPORT: EXPORT_FIELDS,
constants.QR_NETWORK: NETWORK_FIELDS,
}
#: All available field lists
ALL_FIELD_LISTS = ALL_FIELDS.values()
|
# xml.etree test for cElementTree
from test import support
from test.support import bigmemtest, _2G
import unittest
cET = support.import_module('xml.etree.cElementTree')
# cElementTree specific tests
def sanity():
r"""
Import sanity.
>>> from xml.etree import cElementTree
Issue #6697.
>>> e = cElementTree.Element('a')
>>> getattr(e, '\uD800') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: 'Element' object has no attribute '\ud800'
>>> p = cElementTree.XMLParser()
>>> p.version.split()[0]
'Expat'
>>> getattr(p, '\uD800')
Traceback (most recent call last):
...
AttributeError: 'XMLParser' object has no attribute '\ud800'
"""
class MiscTests(unittest.TestCase):
# Issue #8651.
@support.bigmemtest(size=support._2G + 100, memuse=1)
def test_length_overflow(self, size):
if size < support._2G + 100:
self.skipTest("not enough free memory, need at least 2 GB")
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
support.run_doctest(test_xml_etree_c, verbosity=True)
support.run_unittest(MiscTests)
# Assign the C implementation before running the doctests
# Patch the __name__, to prevent confusion with the pure Python test
pyET = test_xml_etree.ET
py__name__ = test_xml_etree.__name__
test_xml_etree.ET = cET
if __name__ != '__main__':
test_xml_etree.__name__ = __name__
try:
# Run the same test suite as xml.etree.ElementTree
test_xml_etree.test_main(module_name='xml.etree.cElementTree')
finally:
test_xml_etree.ET = pyET
test_xml_etree.__name__ = py__name__
if __name__ == '__main__':
test_main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def main(argv):
from sc_logic import check_semi_connectedness
graphs = []
if len(argv) < 2:
print('k = 2')
k = 2
print('Graph 1:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('2 1')
g[1][0] = 1
graphs.append(g)
print('Graph 2:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('1 2')
g[0][1] = 1
graphs.append(g)
else:
with open(argv[1]) as f:
k = int(f.readline().strip())
for _ in range(k):
f.readline()
line = f.readline()
n, m = [int(x.strip()) for x in line.strip().split()]
g = [[0 for _ in range(n)] for _ in range(n)]
for edge in range(m):
line = f.readline()
i, j = [int(x.strip()) for x in line.strip().split()]
g[i - 1][j - 1] = 1
graphs.append(g)
for g in graphs:
r = check_semi_connectedness(g)
print('1' if r else -1, end=' ')
if __name__ == "__main__":
import sys
main(sys.argv)
|
# -*- coding: utf-8 -*-
__author__ = 'isparks'
import enum
class DataType(enum.Enum):
"""ODM Data Types"""
Text = 'text'
Integer = 'integer'
Float = 'float'
Date = 'date'
DateTime = 'datetime'
Time = 'time'
String = 'string' # Used only by codelists
class QueryStatusType(enum.Enum):
"""MdsolQuery action type"""
Open = "Open"
Cancelled = "Cancelled"
Answered = "Answered"
Forwarded = "Forwarded"
Closed = "Closed"
class StepType(enum.Enum):
"""Edit/Derivation step types"""
CustomFunction = "CustomFunction"
IsEmpty = "IsEmpty"
IsNotEmpty = "IsNotEmpty"
Contains = "Contains"
StartsWith = "StartsWith"
IsLessThan = "IsLessThan"
IsLessThanOrEqualTo = "IsLessThanOrEqualTo"
IsGreaterThan = "IsGreaterThan"
IsGreaterThanOrEqualTo = "IsGreaterThanOrEqualTo"
IsEqualTo = "IsEqualTo"
IsNonConformant = "IsNonConformant"
IsNotEqualTo = "IsNotEqualTo"
InLocalLabRange = "InLocalLabRange"
LengthIsLessThan = "LengthIsLessThan"
LengthIsLessThanOrEqualTo = "LengthIsLessThanOrEqualTo"
LengthIsGreaterThan = "LengthIsGreaterThan"
LengthIsGreaterThanOrEqualTo = "LengthIsGreaterThanOrEqualTo"
LengthIsEqualTo = "LengthIsEqualTo"
Or = "Or"
And = "And"
Not = "Not"
Now = "Now"
IsPresent = "IsPresent"
IsActive = "IsActive"
Add = "Add"
Subtract = "Subtract"
Multiply = "Multiply"
Divide = "Divide"
AddDay = "AddDay"
AddMonth = "AddMonth"
AddYear = "AddYear"
AddSec = "AddSec"
AddMin = "AddMin"
AddHour = "AddHour"
DaySpan = "DaySpan"
TimeSpan = "TimeSpan"
Age = "Age"
StringAdd = "StringAdd"
Space = "Space"
ALL_STEPS = [StepType.CustomFunction,
StepType.IsEmpty,
StepType.IsNotEmpty,
StepType.Contains,
StepType.StartsWith,
StepType.IsLessThan,
StepType.IsLessThanOrEqualTo,
StepType.IsGreaterThan,
StepType.IsGreaterThanOrEqualTo,
StepType.IsEqualTo,
StepType.IsNonConformant,
StepType.IsNotEqualTo,
StepType.InLocalLabRange,
StepType.LengthIsLessThan,
StepType.LengthIsLessThanOrEqualTo,
StepType.LengthIsGreaterThan,
StepType.LengthIsGreaterThanOrEqualTo,
StepType.LengthIsEqualTo,
StepType.Or,
StepType.And,
StepType.Not,
StepType.Now,
StepType.IsPresent,
StepType.IsActive,
StepType.Add,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Age,
StepType.StringAdd]
# Note: Missing 2015 additions to edit check step functions.
VALID_DERIVATION_STEPS = [
StepType.Age,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Now,
StepType.StringAdd,
StepType.CustomFunction,
StepType.Space,
StepType.Add
]
class ActionType(enum.Enum):
OpenQuery = "OpenQuery"
RequireReview = "RequireReview"
RequireVerification = "RequireVerification"
AddComment = "AddComment"
AddDeviation = "AddDeviation"
CustomFunction = "CustomFunction"
PlaceSticky = "PlaceSticky"
AddForm = "AddForm"
AddMatrix = "AddMatrix"
MrgMatrix = "MrgMatrix"
OldMrgMatrix = "OldMrgMatrix"
SetNonconformant = "SetNonconformant"
SendMessage = "SendMessage"
SetDataPoint = "SetDataPoint"
SetTimeZero = "SetTimeZero"
SetTimeForward = "SetTimeForward"
SetSubjectStatus = "SetSubjectStatus"
SetSubjectName = "SetSubjectName"
UpdateFormName = "UpdateFormName"
UpdateFolderName = "UpdateFolderName"
SetRecordDate = "SetRecordDate"
SetDataPageDate = "SetDataPageDate"
SetInstanceDate = "SetInstanceDate"
SetSubjectDate = "SetSubjectDate"
SetDataPointVisible = "SetDataPointVisible"
SetSecondarySubjectName = "SetSecondarySubjectName"
SetFormRequiresSignature = "SetFormRequiresSignature"
SetFolderRequiresSignature = "SetFolderRequiresSignature"
SetSubjectRequiresSignature = "SetSubjectRequiresSignature"
SetDynamicSearchList = "SetDynamicSearchList"
ALL_ACTIONS = [
ActionType.OpenQuery,
ActionType.RequireReview,
ActionType.RequireVerification,
ActionType.AddComment,
ActionType.AddDeviation,
ActionType.CustomFunction,
ActionType.PlaceSticky,
ActionType.AddForm,
ActionType.AddMatrix,
ActionType.MrgMatrix,
ActionType.OldMrgMatrix,
ActionType.SetNonconformant,
ActionType.SendMessage,
ActionType.SetDataPoint,
ActionType.SetTimeZero,
ActionType.SetTimeForward,
ActionType.SetSubjectStatus,
ActionType.SetSubjectName,
ActionType.UpdateFormName,
ActionType.UpdateFolderName,
ActionType.SetRecordDate,
ActionType.SetDataPageDate,
ActionType.SetInstanceDate,
ActionType.SetSubjectDate,
ActionType.SetDataPointVisible,
ActionType.SetSecondarySubjectName,
ActionType.SetFormRequiresSignature,
ActionType.SetFolderRequiresSignature,
ActionType.SetSubjectRequiresSignature,
ActionType.SetDynamicSearchList
]
class RangeCheckComparatorType(enum.Enum):
LessThanEqualTo = 'LE'
GreaterThanEqualTo = 'GE'
class RangeCheckType(enum.Enum):
Soft = 'Soft'
Hard = 'Hard'
class ControlType(enum.Enum):
CheckBox = 'CheckBox'
Text = 'Text'
DateTime = 'DateTime'
DropDownList = 'DropDownList'
SearchList = 'SearchList'
RadioButton = 'RadioButton'
RadioButtonVertical = 'RadioButton (Vertical)'
FileUpload = 'File Upload'
LongText = 'LongText'
SignaturePage = 'Signature page'
SignatureFolder = 'Signature folder'
SignatureSubject = 'Signature subject'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.