repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,222,525,072B
| line_mean
float64 6.51
99.8
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
PaulWay/insights-core
|
insights/parsers/tests/test_tuned.py
|
1
|
2414
|
import unittest
from insights.parsers.tuned import Tuned
from insights.tests import context_wrap
TUNED_OUTPUT = '''
Available profiles:
- balanced
- desktop
- latency-performance
- network-latency
- network-throughput
- powersave
- throughput-performance
- virtual-guest
- virtual-host
Current active profile: virtual-guest
'''.strip()
TUNED_OUTPUT2 = '''
Available profiles:
- balanced
- desktop
- latency-performance
- network-latency
- network-throughput
- powersave
- throughput-performance
- virtual-guest
- virtual-host
It seems that tuned daemon is not running, preset profile is not activated.
Preset profile: virtual-guest
'''.strip()
class CheckTuned(unittest.TestCase):
def test_active_profile(self):
tuned_output = Tuned(context_wrap(TUNED_OUTPUT))
assert len(tuned_output.data.get('available')) == 9
assert tuned_output.data.get('active') == 'virtual-guest'
assert tuned_output.data.get('available') == ['balanced',
'desktop',
'latency-performance',
'network-latency',
'network-throughput',
'powersave',
'throughput-performance',
'virtual-guest',
'virtual-host']
def test_preset_profile(self):
tuned_output = Tuned(context_wrap(TUNED_OUTPUT2))
assert len(tuned_output.data.get('available')) == 9
assert tuned_output.data.get('preset') == 'virtual-guest'
assert tuned_output.data.get('available') == ['balanced',
'desktop',
'latency-performance',
'network-latency',
'network-throughput',
'powersave',
'throughput-performance',
'virtual-guest',
'virtual-host']
|
apache-2.0
| 982,681,857,497,023,200 | 37.31746 | 79 | 0.462303 | false |
namgivu/shared-model-FlaskSqlAlchemy-vs-SQLAlchemy
|
python-app/model/user.py
|
1
|
1292
|
from base_model import BaseModel
import sqlalchemy as db
class User(BaseModel):
#table mapping
__tablename__ = "users"
##region column mapping
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.Text)
primary_email_id = db.Column(db.Integer, db.ForeignKey('user_emails.id') )
#Use model class instead of physical table name for db.ForeignKey() ref. http://stackoverflow.com/a/41633052/248616
from model.address import Address
billing_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
shipping_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
##endregion column mapping
##region relationship obj
emails = db.relationship('UserEmail',
primaryjoin='User.id==UserEmail.user_id',
back_populates='owner')
primaryEmail = db.relationship('UserEmail',
primaryjoin='User.primary_email_id==UserEmail.id')
billingAddress = db.relationship('Address',
primaryjoin='User.billing_address_id==Address.id')
shippingAddress = db.relationship('Address',
primaryjoin='User.shipping_address_id==Address.id')
##endregion relationship obj
|
gpl-3.0
| 6,215,019,284,809,545,000 | 37 | 117 | 0.647059 | false |
JNU-Include/CNN
|
Test/lab-12-2-char-seq-rnn2.py
|
1
|
1109
|
# Lab 12 Character Sequence RNN
from lib.rnn_core2 import RNNCore2
class XXX (RNNCore2):
def init_network(self):
self.set_placeholder(self.sequence_length) #15
hypothesis = self.rnn_lstm_cell(self.X, self.num_classes, self.hidden_size, self.batch_size)
self.set_hypothesis(hypothesis)
self.set_cost_function(self.batch_size, self.sequence_length)
self.set_optimizer(0.1)
gildong = XXX()
ms = " If you want you"
xd, yd = gildong.get_data(ms)
print(xd)
print(yd)
gildong.learn(xd, yd, 400, 20) #3000
gildong.predict(xd)
gildong.show_error()
'''
0 loss: 2.29895 Prediction: nnuffuunnuuuyuy
1 loss: 2.29675 Prediction: nnuffuunnuuuyuy
2 loss: 2.29459 Prediction: nnuffuunnuuuyuy
3 loss: 2.29247 Prediction: nnuffuunnuuuyuy
...
1413 loss: 1.3745 Prediction: if you want you
1414 loss: 1.3743 Prediction: if you want you
1415 loss: 1.3741 Prediction: if you want you
1416 loss: 1.3739 Prediction: if you want you
1417 loss: 1.3737 Prediction: if you want you
1418 loss: 1.37351 Prediction: if you want you
1419 loss: 1.37331 Prediction: if you want you
'''
|
mit
| -6,704,559,214,281,357,000 | 23.644444 | 100 | 0.719567 | false |
wakermahmud/sync-engine
|
tests/events/test_recurrence.py
|
1
|
21964
|
import pytest
import arrow
from dateutil import tz
from dateutil.rrule import rrulestr
from datetime import timedelta
from inbox.models.event import Event, RecurringEvent, RecurringEventOverride
from inbox.models.when import Date, Time, DateSpan, TimeSpan
from inbox.events.remote_sync import handle_event_updates
from inbox.events.recurring import (link_events, get_start_times,
parse_exdate, rrule_to_json)
from inbox.log import get_logger
log = get_logger()
TEST_RRULE = ["RRULE:FREQ=WEEKLY;UNTIL=20140918T203000Z;BYDAY=TH"]
TEST_EXDATE = ["EXDATE;TZID=America/Los_Angeles:20140904T133000"]
ALL_DAY_RRULE = ["RRULE:FREQ=WEEKLY;UNTIL=20140911;BYDAY=TH"]
TEST_EXDATE_RULE = TEST_RRULE[:]
TEST_EXDATE_RULE.extend(TEST_EXDATE)
def recurring_event(db, account, calendar, rrule,
start=arrow.get(2014, 8, 7, 20, 30, 00),
end=arrow.get(2014, 8, 7, 21, 30, 00),
all_day=False, commit=True):
# commit: are we returning a commited instance object?
if commit:
ev = db.session.query(Event).filter_by(uid='myuid').first()
if ev:
db.session.delete(ev)
ev = Event(namespace_id=account.namespace.id,
calendar=calendar,
title='recurring',
description='',
uid='myuid',
location='',
busy=False,
read_only=False,
reminders='',
recurrence=rrule,
start=start,
end=end,
all_day=all_day,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=None,
master_event_uid=None,
source='local')
if commit:
db.session.add(ev)
db.session.commit()
return ev
def recurring_override(db, master, original_start, start, end):
# Returns an Override that is explicitly linked to master
ev = recurring_override_instance(db, master, original_start, start, end)
ev.master = master
db.session.commit()
return ev
def recurring_override_instance(db, master, original_start, start, end):
# Returns an Override that has the master's UID, but is not linked yet
override_uid = '{}_{}'.format(master.uid,
original_start.strftime("%Y%m%dT%H%M%SZ"))
ev = db.session.query(Event).filter_by(uid=override_uid).first()
if ev:
db.session.delete(ev)
db.session.commit()
ev = Event(original_start_time=original_start,
master_event_uid=master.uid,
namespace_id=master.namespace_id,
calendar_id=master.calendar_id)
ev.update(master)
ev.uid = override_uid
ev.start = start
ev.end = end
ev.master_event_uid = master.uid
db.session.add(ev)
return ev
def test_create_recurrence(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
assert isinstance(event, RecurringEvent)
assert event.rrule is not None
assert event.exdate is not None
assert event.until is not None
def test_link_events_from_override(db, default_account, calendar):
# Test that by creating a recurring event and override separately, we
# can link them together based on UID and namespace_id when starting
# from the override.
master = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
original_start = parse_exdate(master)[0]
override = Event(original_start_time=original_start,
master_event_uid=master.uid,
namespace_id=master.namespace_id,
source='local')
assert isinstance(override, RecurringEventOverride)
link_events(db.session, override)
assert override.master == master
def test_link_events_from_master(db, default_account, calendar):
# Test that by creating a recurring event and override separately, we
# can link them together based on UID and namespace_id when starting
# from the master event.
master = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
original_start = parse_exdate(master)[0]
override = recurring_override_instance(db, master, original_start,
master.start, master.end)
assert isinstance(master, RecurringEvent)
o = link_events(db.session, master)
assert len(o) == 1
assert override in master.overrides
assert override.uid in master.override_uids
def test_rrule_parsing(db, default_account, calendar):
# This test event starts on Aug 7 and recurs every Thursday at 20:30
# until Sept 18.
# There should be 7 total occurrences including Aug 7 and Sept 18.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
g = get_start_times(event)
assert len(g) == 7
# Check we can supply an end date to cut off recurrence expansion
g = get_start_times(event, end=arrow.get(2014, 9, 12, 21, 30, 00))
assert len(g) == 6
def test_all_day_rrule_parsing(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 8, 7),
end=arrow.get(2014, 8, 7),
all_day=True)
g = get_start_times(event)
assert len(g) == 6
def test_rrule_exceptions(db, default_account, calendar):
# This test event starts on Aug 7 and recurs every Thursday at 20:30
# until Sept 18, except on September 4.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
g = get_start_times(event)
assert len(g) == 6
assert arrow.get(2014, 9, 4, 13, 30, 00) not in g
def test_inflation(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
assert i.title == event.title
assert (i.end - i.start) == (event.end - event.start)
assert i.public_id.startswith(event.public_id)
# make sure the original event instance appears too
assert event.start in [e.start for e in infl]
def test_inflation_exceptions(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
assert i.title == event.title
assert (i.end - i.start) == (event.end - event.start)
assert i.start != arrow.get(2014, 9, 4, 13, 30, 00)
def test_inflate_across_DST(db, default_account, calendar):
# If we inflate a RRULE that covers a change to/from Daylight Savings Time,
# adjust the base time accordingly to account for the new UTC offset.
# Daylight Savings for US/PST: March 8, 2015 - Nov 1, 2015
dst_rrule = ["RRULE:FREQ=WEEKLY;BYDAY=TU"]
dst_event = recurring_event(db, default_account, calendar, dst_rrule,
start=arrow.get(2015, 03, 03, 03, 03, 03),
end=arrow.get(2015, 03, 03, 04, 03, 03))
g = get_start_times(dst_event, end=arrow.get(2015, 03, 21))
# In order for this event to occur at the same local time, the recurrence
# rule should be expanded to 03:03:03 before March 8, and 02:03:03 after,
# keeping the local time of the event consistent at 19:03.
# This is consistent with how Google returns recurring event instances.
local_tz = tz.gettz(dst_event.start_timezone)
for time in g:
if time < arrow.get(2015, 3, 8):
assert time.hour == 3
else:
assert time.hour == 2
# Test that localizing these times is consistent
assert time.astimezone(local_tz).hour == 19
# Test an event that starts during local daylight savings time
dst_event = recurring_event(db, default_account, calendar, dst_rrule,
start=arrow.get(2015, 10, 27, 02, 03, 03),
end=arrow.get(2015, 10, 27, 03, 03, 03))
g = get_start_times(dst_event, end=arrow.get(2015, 11, 11))
for time in g:
if time > arrow.get(2015, 11, 1):
assert time.hour == 3
else:
assert time.hour == 2
assert time.astimezone(local_tz).hour == 19
def test_inflate_all_day_event(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 9, 4),
end=arrow.get(2014, 9, 4), all_day=True)
infl = event.inflate()
for i in infl:
assert i.all_day
assert isinstance(i.when, Date)
assert i.start in [arrow.get(2014, 9, 4), arrow.get(2014, 9, 11)]
def test_inflate_multi_day_event(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 9, 4),
end=arrow.get(2014, 9, 5), all_day=True)
infl = event.inflate()
for i in infl:
assert i.all_day
assert isinstance(i.when, DateSpan)
assert i.start in [arrow.get(2014, 9, 4), arrow.get(2014, 9, 11)]
assert i.end in [arrow.get(2014, 9, 5), arrow.get(2014, 9, 12)]
def test_invalid_rrule_entry(db, default_account, calendar):
# If we don't know how to expand the RRULE, we treat the event as if
# it were a single instance.
event = recurring_event(db, default_account, calendar, 'INVALID_RRULE_YAY')
infl = event.inflate()
assert len(infl) == 1
assert infl[0].start == event.start
def test_invalid_parseable_rrule_entry(db, default_account, calendar):
event = recurring_event(db, default_account, calendar,
["RRULE:FREQ=CHRISTMAS;UNTIL=1984;BYDAY=QQ"])
infl = event.inflate()
assert len(infl) == 1
assert infl[0].start == event.start
def test_non_recurring_events_behave(db, default_account, calendar):
event = Event(namespace_id=default_account.namespace.id,
calendar=calendar,
title='not recurring',
description='',
uid='non_recurring_uid',
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 07, 07, 13, 30),
end=arrow.get(2014, 07, 07, 13, 55),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=None,
master_event_uid=None,
source='local')
assert isinstance(event, Event)
with pytest.raises(AttributeError):
event.inflate()
def test_inflated_events_cant_persist(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
db.session.add(i)
with pytest.raises(Exception) as excinfo:
# FIXME "No handlers could be found for logger" - ensure this is only
# a test issue or fix.
db.session.commit()
assert 'should not be committed' in str(excinfo.value)
def test_override_instantiated(db, default_account, calendar):
# Test that when a recurring event has overrides, they show up as
# RecurringEventOverrides, have links back to the parent, and don't
# appear twice in the event list.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
all_events = event.all_events()
assert len(all_events) == 7
assert override in all_events
def test_override_same_start(db, default_account, calendar):
# Test that when a recurring event has an override without a modified
# start date (ie. the RRULE has no EXDATE for that event), it doesn't
# appear twice in the all_events list.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00))
all_events = event.all_events()
assert len(all_events) == 7
unique_starts = list(set([e.start for e in all_events]))
assert len(unique_starts) == 7
assert override in all_events
def test_override_updated(db, default_account, calendar):
# Test that when a recurring event override is created or updated
# remotely, we update our override links appropriately.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
assert event is not None
# create a new Event, as if we just got it from Google
master_uid = event.uid
override_uid = master_uid + "_20140814T203000Z"
override = Event(title='new override from google',
description='',
uid=override_uid,
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 30, 00),
end=arrow.get(2014, 8, 14, 23, 30, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=master_uid,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override],
log,
db.session)
db.session.commit()
# Lets see if the event got saved with the right info
find_override = db.session.query(Event).filter_by(uid=override_uid).one()
assert find_override is not None
assert find_override.master_event_id == event.id
# Update the same override, making sure we don't create two
override = Event(title='new override from google',
description='',
uid=override_uid,
location='walk and talk',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 15, 00),
end=arrow.get(2014, 8, 14, 23, 15, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=master_uid,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override], log, db.session)
db.session.commit()
# Let's see if the event got saved with the right info
find_override = db.session.query(Event).filter_by(uid=override_uid).one()
assert find_override is not None
assert find_override.master_event_id == event.id
assert find_override.location == 'walk and talk'
def test_override_cancelled(db, default_account, calendar):
# Test that overrides with status 'cancelled' are appropriately missing
# from the expanded event.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
override.cancelled = True
all_events = event.all_events()
assert len(all_events) == 6
assert override not in all_events
assert not any([e.start == arrow.get(2014, 9, 4, 20, 30, 00)
for e in all_events])
def test_new_instance_cancelled(db, default_account, calendar):
# Test that if we receive a cancelled override from Google, we save it
# as an override with cancelled status rather than deleting it.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override_uid = event.uid + "_20140814T203000Z"
override = Event(title='CANCELLED',
description='',
uid=override_uid,
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 15, 00),
end=arrow.get(2014, 8, 14, 23, 15, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=event.uid,
cancelled=True,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override], log, db.session)
db.session.commit()
# Check the event got saved with the cancelled flag
find_override = db.session.query(Event).filter_by(
uid=override_uid, namespace_id=default_account.namespace.id).one()
assert find_override.cancelled is True
def test_when_delta():
# Test that the event length is calculated correctly
ev = Event(namespace_id=0)
# Time: minutes is 0 if start/end at same time
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 01, 10, 00, 00)
when = ev.when
assert isinstance(when, Time)
assert ev.length == timedelta(minutes=0)
# TimeSpan
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 01, 10, 30, 00)
when = ev.when
assert isinstance(when, TimeSpan)
assert ev.length == timedelta(minutes=30)
# Date: notice days is 0 if starts/ends on same day
ev.all_day = True
ev.start = arrow.get(2015, 01, 01, 00, 00, 00)
ev.end = arrow.get(2015, 01, 01, 00, 00, 00)
when = ev.when
assert isinstance(when, Date)
assert ev.length == timedelta(days=0)
# DateSpan
ev.all_day = True
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 02, 10, 00, 00)
when = ev.when
assert isinstance(when, DateSpan)
assert ev.length == timedelta(days=1)
def test_rrule_to_json():
# Generate more test cases!
# http://jakubroztocil.github.io/rrule/
r = 'RRULE:FREQ=WEEKLY;UNTIL=20140918T203000Z;BYDAY=TH'
r = rrulestr(r, dtstart=None)
j = rrule_to_json(r)
assert j.get('freq') == 'WEEKLY'
assert j.get('byweekday') == 'TH'
r = 'FREQ=HOURLY;COUNT=30;WKST=MO;BYMONTH=1;BYMINUTE=42;BYSECOND=24'
r = rrulestr(r, dtstart=None)
j = rrule_to_json(r)
assert j.get('until') is None
assert j.get('byminute') is 42
def test_master_cancelled(db, default_account, calendar):
# Test that when the master recurring event is cancelled, we cancel every
# override too.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
update = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE,
commit=False)
update.status = 'cancelled'
updates = [update]
handle_event_updates(default_account.namespace.id,
calendar.id,
updates, log, db.session)
db.session.commit()
find_master = db.session.query(Event).filter_by(uid=event.uid).first()
assert find_master.status == 'cancelled'
find_override = db.session.query(Event).filter_by(uid=override.uid).first()
assert find_override.status == 'cancelled'
def test_made_recurring_then_cancelled(db, default_account, calendar):
# Test that when an event is updated with a recurrence and cancelled at
# the same time, we cancel it.
normal = recurring_event(db, default_account, calendar, None)
# Check this is specifically an Event, not a RecurringEvent
assert type(normal) == Event
# Update with a recurrence rule *and* cancellation
update = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE,
commit=False)
update.status = 'cancelled'
updates = [update]
handle_event_updates(default_account.namespace.id,
calendar.id,
updates, log, db.session)
db.session.commit()
find_master = db.session.query(Event).filter_by(uid=normal.uid).first()
assert find_master.status == 'cancelled'
|
agpl-3.0
| -8,230,936,768,372,044,000 | 39.825279 | 79 | 0.5886 | false |
rhyolight/nupic.son
|
app/soc/tasks/helper/timekeeper.py
|
1
|
1977
|
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for deadline-based batch processing using the Task API"""
import datetime
from google.appengine.runtime import DeadlineExceededError
class Timekeeper:
"""Raise a DeadlineExceededError on your schedule
Python raises this exception when your request handler is about to
exceed its 30 second deadline. But you have less than a second to handle
the exception, which is probably not enough time to reliably requeue a
task in the Task API, for example.
You can get some breathing room by setting your own artificial deadline
and leaving sufficient time to, e.g., interrupt and requeue your task
"""
def __init__(self, timelimit, starttime=None):
# Allow override for testing
if not starttime:
starttime = datetime.datetime.now()
# Calculate the deadline as an offset from starttime
self.deadline = starttime + datetime.timedelta(milliseconds=timelimit)
def ping(self, currtime=None):
"""Enforce the deadline, return time remaining"""
# Allow override for testing
if not currtime:
currtime = datetime.datetime.now()
# Raise artifical deadline
if currtime >= self.deadline:
raise DeadlineExceededError()
# Return time remaining
return self.deadline - currtime
def iterate(self, items):
"""Iterate a sequence, pinging for each"""
for item in items:
yield self.ping(), item
|
apache-2.0
| -3,785,768,671,208,194,600 | 30.887097 | 74 | 0.736975 | false |
klmitch/nova
|
nova/virt/hardware.py
|
1
|
101347
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import math
import re
import typing as ty
import os_resource_classes as orc
import os_traits
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import fields
from nova.pci import stats
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
MEMPAGES_SMALL = -1
MEMPAGES_LARGE = -2
MEMPAGES_ANY = -3
class VTPMConfig(ty.NamedTuple):
version: str
model: str
def get_vcpu_pin_set():
"""Parse ``vcpu_pin_set`` config.
:returns: A set of host CPU IDs that can be used for VCPU and PCPU
allocations.
"""
if not CONF.vcpu_pin_set:
return None
cpuset_ids = parse_cpu_spec(CONF.vcpu_pin_set)
if not cpuset_ids:
msg = _("No CPUs available after parsing 'vcpu_pin_set' config, %r")
raise exception.Invalid(msg % CONF.vcpu_pin_set)
return cpuset_ids
def get_cpu_dedicated_set():
"""Parse ``[compute] cpu_dedicated_set`` config.
:returns: A set of host CPU IDs that can be used for PCPU allocations.
"""
if not CONF.compute.cpu_dedicated_set:
return None
cpu_ids = parse_cpu_spec(CONF.compute.cpu_dedicated_set)
if not cpu_ids:
msg = _("No CPUs available after parsing '[compute] "
"cpu_dedicated_set' config, %r")
raise exception.Invalid(msg % CONF.compute.cpu_dedicated_set)
return cpu_ids
def get_cpu_shared_set():
"""Parse ``[compute] cpu_shared_set`` config.
:returns: A set of host CPU IDs that can be used for emulator threads and,
optionally, for VCPU allocations.
"""
if not CONF.compute.cpu_shared_set:
return None
shared_ids = parse_cpu_spec(CONF.compute.cpu_shared_set)
if not shared_ids:
msg = _("No CPUs available after parsing '[compute] cpu_shared_set' "
"config, %r")
raise exception.Invalid(msg % CONF.compute.cpu_shared_set)
return shared_ids
def parse_cpu_spec(spec: str) -> ty.Set[int]:
"""Parse a CPU set specification.
Each element in the list is either a single CPU number, a range of
CPU numbers, or a caret followed by a CPU number to be excluded
from a previous range.
:param spec: cpu set string eg "1-4,^3,6"
:returns: a set of CPU indexes
"""
cpuset_ids: ty.Set[int] = set()
cpuset_reject_ids: ty.Set[int] = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
reject = False
if range_parts[0] and range_parts[0][0] == '^':
reject = True
range_parts[0] = str(range_parts[0][1:])
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
if not reject:
cpuset_ids |= set(range(start, end + 1))
else:
cpuset_reject_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids
def format_cpu_spec(
cpuset: ty.Set[int],
allow_ranges: bool = True,
) -> str:
"""Format a libvirt CPU range specification.
Format a set/list of CPU indexes as a libvirt CPU range
specification. If allow_ranges is true, it will try to detect
continuous ranges of CPUs, otherwise it will just list each CPU
index explicitly.
:param cpuset: set (or list) of CPU indexes
:param allow_ranges: Whether we should attempt to detect continuous ranges
of CPUs.
:returns: a formatted CPU range string
"""
# We attempt to detect ranges, but don't bother with
# trying to do range negations to minimize the overall
# spec string length
if allow_ranges:
ranges: ty.List[ty.List[int]] = []
previndex = None
for cpuindex in sorted(cpuset):
if previndex is None or previndex != (cpuindex - 1):
ranges.append([])
ranges[-1].append(cpuindex)
previndex = cpuindex
parts = []
for entry in ranges:
if len(entry) == 1:
parts.append(str(entry[0]))
else:
parts.append("%d-%d" % (entry[0], entry[len(entry) - 1]))
return ",".join(parts)
else:
return ",".join(str(id) for id in sorted(cpuset))
def get_number_of_serial_ports(flavor, image_meta):
"""Get the number of serial consoles from the flavor or image.
If flavor extra specs is not set, then any image meta value is
permitted. If flavor extra specs *is* set, then this provides the
default serial port count. The image meta is permitted to override
the extra specs, but *only* with a lower value, i.e.:
- flavor hw:serial_port_count=4
VM gets 4 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=2
VM gets 2 serial ports
- image hw_serial_port_count=6
VM gets 6 serial ports
- flavor hw:serial_port_count=4 and image hw_serial_port_count=6
Abort guest boot - forbidden to exceed flavor value
:param flavor: Flavor object to read extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:raises: exception.ImageSerialPortNumberInvalid if the serial port count
is not a valid integer
:raises: exception.ImageSerialPortNumberExceedFlavorValue if the serial
port count defined in image is greater than that of flavor
:returns: number of serial ports
"""
flavor_num_ports, image_num_ports = _get_flavor_image_meta(
'serial_port_count', flavor, image_meta)
if flavor_num_ports:
try:
flavor_num_ports = int(flavor_num_ports)
except ValueError:
raise exception.ImageSerialPortNumberInvalid(
num_ports=flavor_num_ports)
if flavor_num_ports and image_num_ports:
if image_num_ports > flavor_num_ports:
raise exception.ImageSerialPortNumberExceedFlavorValue()
return image_num_ports
return flavor_num_ports or image_num_ports or 1
class InstanceInfo(object):
def __init__(self, state, internal_id=None):
"""Create a new Instance Info object
:param state: Required. The running state, one of the power_state codes
:param internal_id: Optional. A unique ID for the instance. Need not be
related to the Instance.uuid.
"""
self.state = state
self.internal_id = internal_id
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def _score_cpu_topology(topology, wanttopology):
"""Compare a topology against a desired configuration.
Calculate a score indicating how well a provided topology matches
against a preferred topology, where:
a score of 3 indicates an exact match for sockets, cores and
threads
a score of 2 indicates a match of sockets and cores, or sockets
and threads, or cores and threads
a score of 1 indicates a match of sockets or cores or threads
a score of 0 indicates no match
:param wanttopology: nova.objects.VirtCPUTopology instance for
preferred topology
:returns: score in range 0 (worst) to 3 (best)
"""
score = 0
if wanttopology.sockets and topology.sockets == wanttopology.sockets:
score = score + 1
if wanttopology.cores and topology.cores == wanttopology.cores:
score = score + 1
if wanttopology.threads and topology.threads == wanttopology.threads:
score = score + 1
return score
def get_cpu_topology_constraints(flavor, image_meta):
"""Get the topology constraints declared in flavor or image
Extracts the topology constraints from the configuration defined in
the flavor extra specs or the image metadata. In the flavor this
will look for:
hw:cpu_sockets - preferred socket count
hw:cpu_cores - preferred core count
hw:cpu_threads - preferred thread count
hw:cpu_max_sockets - maximum socket count
hw:cpu_max_cores - maximum core count
hw:cpu_max_threads - maximum thread count
In the image metadata this will look at:
hw_cpu_sockets - preferred socket count
hw_cpu_cores - preferred core count
hw_cpu_threads - preferred thread count
hw_cpu_max_sockets - maximum socket count
hw_cpu_max_cores - maximum core count
hw_cpu_max_threads - maximum thread count
The image metadata must be strictly lower than any values set in
the flavor. All values are, however, optional.
:param flavor: Flavor object to read extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:raises: exception.ImageVCPULimitsRangeExceeded if the maximum
counts set against the image exceed the maximum counts
set against the flavor
:raises: exception.ImageVCPUTopologyRangeExceeded if the preferred
counts set against the image exceed the maximum counts set
against the image or flavor
:raises: exception.InvalidRequest if one of the provided flavor properties
is a non-integer
:returns: A two-tuple of objects.VirtCPUTopology instances. The
first element corresponds to the preferred topology,
while the latter corresponds to the maximum topology,
based on upper limits.
"""
flavor_max_sockets, image_max_sockets = _get_flavor_image_meta(
'cpu_max_sockets', flavor, image_meta, 0)
flavor_max_cores, image_max_cores = _get_flavor_image_meta(
'cpu_max_cores', flavor, image_meta, 0)
flavor_max_threads, image_max_threads = _get_flavor_image_meta(
'cpu_max_threads', flavor, image_meta, 0)
# image metadata is already of the correct type
try:
flavor_max_sockets = int(flavor_max_sockets)
flavor_max_cores = int(flavor_max_cores)
flavor_max_threads = int(flavor_max_threads)
except ValueError as e:
msg = _('Invalid flavor extra spec. Error: %s') % str(e)
raise exception.InvalidRequest(msg)
LOG.debug("Flavor limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flavor_max_sockets,
"cores": flavor_max_cores,
"threads": flavor_max_threads})
LOG.debug("Image limits %(sockets)d:%(cores)d:%(threads)d",
{"sockets": image_max_sockets,
"cores": image_max_cores,
"threads": image_max_threads})
# Image limits are not permitted to exceed the flavor
# limits. ie they can only lower what the flavor defines
if ((flavor_max_sockets and image_max_sockets > flavor_max_sockets) or
(flavor_max_cores and image_max_cores > flavor_max_cores) or
(flavor_max_threads and image_max_threads > flavor_max_threads)):
raise exception.ImageVCPULimitsRangeExceeded(
image_sockets=image_max_sockets,
image_cores=image_max_cores,
image_threads=image_max_threads,
flavor_sockets=flavor_max_sockets,
flavor_cores=flavor_max_cores,
flavor_threads=flavor_max_threads)
max_sockets = image_max_sockets or flavor_max_sockets or 65536
max_cores = image_max_cores or flavor_max_cores or 65536
max_threads = image_max_threads or flavor_max_threads or 65536
flavor_sockets, image_sockets = _get_flavor_image_meta(
'cpu_sockets', flavor, image_meta, 0)
flavor_cores, image_cores = _get_flavor_image_meta(
'cpu_cores', flavor, image_meta, 0)
flavor_threads, image_threads = _get_flavor_image_meta(
'cpu_threads', flavor, image_meta, 0)
try:
flavor_sockets = int(flavor_sockets)
flavor_cores = int(flavor_cores)
flavor_threads = int(flavor_threads)
except ValueError as e:
msg = _('Invalid flavor extra spec. Error: %s') % str(e)
raise exception.InvalidRequest(msg)
LOG.debug("Flavor pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": flavor_sockets,
"cores": flavor_cores,
"threads": flavor_threads})
LOG.debug("Image pref %(sockets)d:%(cores)d:%(threads)d",
{"sockets": image_sockets,
"cores": image_cores,
"threads": image_threads})
# If the image limits have reduced the flavor limits we might need
# to discard the preferred topology from the flavor
if ((flavor_sockets > max_sockets) or
(flavor_cores > max_cores) or
(flavor_threads > max_threads)):
flavor_sockets = flavor_cores = flavor_threads = 0
# However, image topology is not permitted to exceed image/flavor
# limits
if ((image_sockets > max_sockets) or
(image_cores > max_cores) or
(image_threads > max_threads)):
raise exception.ImageVCPUTopologyRangeExceeded(
image_sockets=image_sockets,
image_cores=image_cores,
image_threads=image_threads,
max_sockets=max_sockets,
max_cores=max_cores,
max_threads=max_threads)
# If no preferred topology was set against the image then use the
# preferred topology from the flavor. We use 'not or' rather than
# 'not and', since if any value is set against the image this
# invalidates the entire set of values from the flavor
if not any((image_sockets, image_cores, image_threads)):
sockets = flavor_sockets
cores = flavor_cores
threads = flavor_threads
else:
sockets = image_sockets
cores = image_cores
threads = image_threads
LOG.debug('Chose sockets=%(sockets)d, cores=%(cores)d, '
'threads=%(threads)d; limits were sockets=%(maxsockets)d, '
'cores=%(maxcores)d, threads=%(maxthreads)d',
{"sockets": sockets, "cores": cores,
"threads": threads, "maxsockets": max_sockets,
"maxcores": max_cores, "maxthreads": max_threads})
return (objects.VirtCPUTopology(sockets=sockets, cores=cores,
threads=threads),
objects.VirtCPUTopology(sockets=max_sockets, cores=max_cores,
threads=max_threads))
def _get_possible_cpu_topologies(vcpus, maxtopology,
allow_threads):
"""Get a list of possible topologies for a vCPU count.
Given a total desired vCPU count and constraints on the maximum
number of sockets, cores and threads, return a list of
objects.VirtCPUTopology instances that represent every possible
topology that satisfies the constraints.
:param vcpus: total number of CPUs for guest instance
:param maxtopology: objects.VirtCPUTopology instance for upper
limits
:param allow_threads: True if the hypervisor supports CPU threads
:raises: exception.ImageVCPULimitsRangeImpossible if it is
impossible to achieve the total vcpu count given
the maximum limits on sockets, cores and threads
:returns: list of objects.VirtCPUTopology instances
"""
# Clamp limits to number of vcpus to prevent
# iterating over insanely large list
maxsockets = min(vcpus, maxtopology.sockets)
maxcores = min(vcpus, maxtopology.cores)
maxthreads = min(vcpus, maxtopology.threads)
if not allow_threads:
maxthreads = 1
LOG.debug("Build topologies for %(vcpus)d vcpu(s) "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d",
{"vcpus": vcpus, "maxsockets": maxsockets,
"maxcores": maxcores, "maxthreads": maxthreads})
# Figure out all possible topologies that match
# the required vcpus count and satisfy the declared
# limits. If the total vCPU count were very high
# it might be more efficient to factorize the vcpu
# count and then only iterate over its factors, but
# that's overkill right now
possible = []
for s in range(1, maxsockets + 1):
for c in range(1, maxcores + 1):
for t in range(1, maxthreads + 1):
if (t * c * s) != vcpus:
continue
possible.append(
objects.VirtCPUTopology(sockets=s,
cores=c,
threads=t))
# We want to
# - Minimize threads (ie larger sockets * cores is best)
# - Prefer sockets over cores
possible = sorted(possible, reverse=True,
key=lambda x: (x.sockets * x.cores,
x.sockets,
x.threads))
LOG.debug("Got %d possible topologies", len(possible))
if len(possible) == 0:
raise exception.ImageVCPULimitsRangeImpossible(vcpus=vcpus,
sockets=maxsockets,
cores=maxcores,
threads=maxthreads)
return possible
def _filter_for_numa_threads(possible, wantthreads):
"""Filter topologies which closest match to NUMA threads.
Determine which topologies provide the closest match to the number
of threads desired by the NUMA topology of the instance.
The possible topologies may not have any entries which match the
desired thread count. This method will find the topologies which
have the closest matching count. For example, if 'wantthreads' is 4
and the possible topologies has entries with 6, 3, 2 or 1 threads,
the topologies which have 3 threads will be identified as the
closest match not greater than 4 and will be returned.
:param possible: list of objects.VirtCPUTopology instances
:param wantthreads: desired number of threads
:returns: list of objects.VirtCPUTopology instances
"""
# First figure out the largest available thread
# count which is not greater than wantthreads
mostthreads = 0
for topology in possible:
if topology.threads > wantthreads:
continue
if topology.threads > mostthreads:
mostthreads = topology.threads
# Now restrict to just those topologies which
# match the largest thread count
bestthreads = []
for topology in possible:
if topology.threads != mostthreads:
continue
bestthreads.append(topology)
return bestthreads
def _sort_possible_cpu_topologies(possible, wanttopology):
"""Sort the topologies in order of preference.
Sort the provided list of possible topologies such that the
configurations which most closely match the preferred topology are
first.
:param possible: list of objects.VirtCPUTopology instances
:param wanttopology: objects.VirtCPUTopology instance for preferred
topology
:returns: sorted list of nova.objects.VirtCPUTopology instances
"""
# Look at possible topologies and score them according
# to how well they match the preferred topologies
# We don't use python's sort(), since we want to
# preserve the sorting done when populating the
# 'possible' list originally
scores: ty.Dict[int, ty.List['objects.VirtCPUTopology']] = (
collections.defaultdict(list)
)
for topology in possible:
score = _score_cpu_topology(topology, wanttopology)
scores[score].append(topology)
# Build list of all possible topologies sorted
# by the match score, best match first
desired = []
desired.extend(scores[3])
desired.extend(scores[2])
desired.extend(scores[1])
desired.extend(scores[0])
return desired
def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True,
numa_topology=None):
"""Identify desirable CPU topologies based for given constraints.
Look at the properties set in the flavor extra specs and the image
metadata and build up a list of all possible valid CPU topologies
that can be used in the guest. Then return this list sorted in
order of preference.
:param flavor: objects.Flavor instance to query extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:param allow_threads: if the hypervisor supports CPU threads
:param numa_topology: objects.InstanceNUMATopology instance that
may contain additional topology constraints
(such as threading information) that should
be considered
:returns: sorted list of objects.VirtCPUTopology instances
"""
LOG.debug("Getting desirable topologies for flavor %(flavor)s "
"and image_meta %(image_meta)s, allow threads: %(threads)s",
{"flavor": flavor, "image_meta": image_meta,
"threads": allow_threads})
preferred, maximum = get_cpu_topology_constraints(flavor, image_meta)
LOG.debug("Topology preferred %(preferred)s, maximum %(maximum)s",
{"preferred": preferred, "maximum": maximum})
possible = _get_possible_cpu_topologies(flavor.vcpus,
maximum,
allow_threads)
LOG.debug("Possible topologies %s", possible)
if numa_topology:
min_requested_threads = None
cell_topologies = [cell.cpu_topology for cell in numa_topology.cells
if ('cpu_topology' in cell and cell.cpu_topology)]
if cell_topologies:
min_requested_threads = min(
topo.threads for topo in cell_topologies)
if min_requested_threads:
if preferred.threads:
min_requested_threads = min(preferred.threads,
min_requested_threads)
specified_threads = max(1, min_requested_threads)
LOG.debug("Filtering topologies best for %d threads",
specified_threads)
possible = _filter_for_numa_threads(possible,
specified_threads)
LOG.debug("Remaining possible topologies %s",
possible)
desired = _sort_possible_cpu_topologies(possible, preferred)
LOG.debug("Sorted desired topologies %s", desired)
return desired
def get_best_cpu_topology(flavor, image_meta, allow_threads=True,
numa_topology=None):
"""Identify best CPU topology for given constraints.
Look at the properties set in the flavor extra specs and the image
metadata and build up a list of all possible valid CPU topologies
that can be used in the guest. Then return the best topology to use
:param flavor: objects.Flavor instance to query extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:param allow_threads: if the hypervisor supports CPU threads
:param numa_topology: objects.InstanceNUMATopology instance that
may contain additional topology constraints
(such as threading information) that should
be considered
:returns: an objects.VirtCPUTopology instance for best topology
"""
return _get_desirable_cpu_topologies(flavor, image_meta,
allow_threads, numa_topology)[0]
def _numa_cell_supports_pagesize_request(host_cell, inst_cell):
"""Determine whether the cell can accept the request.
:param host_cell: host cell to fit the instance cell onto
:param inst_cell: instance cell we want to fit
:raises: exception.MemoryPageSizeNotSupported if custom page
size not supported in host cell
:returns: the page size able to be handled by host_cell
"""
avail_pagesize = [page.size_kb for page in host_cell.mempages]
avail_pagesize.sort(reverse=True)
def verify_pagesizes(host_cell, inst_cell, avail_pagesize):
inst_cell_mem = inst_cell.memory * units.Ki
for pagesize in avail_pagesize:
if host_cell.can_fit_pagesize(pagesize, inst_cell_mem):
return pagesize
if inst_cell.pagesize == MEMPAGES_SMALL:
return verify_pagesizes(host_cell, inst_cell, avail_pagesize[-1:])
elif inst_cell.pagesize == MEMPAGES_LARGE:
return verify_pagesizes(host_cell, inst_cell, avail_pagesize[:-1])
elif inst_cell.pagesize == MEMPAGES_ANY:
return verify_pagesizes(host_cell, inst_cell, avail_pagesize)
else:
return verify_pagesizes(host_cell, inst_cell, [inst_cell.pagesize])
def _pack_instance_onto_cores(host_cell, instance_cell,
num_cpu_reserved=0):
"""Pack an instance onto a set of siblings.
Calculate the pinning for the given instance and its topology,
making sure that hyperthreads of the instance match up with those
of the host when the pinning takes effect. Also ensure that the
physical cores reserved for hypervisor on this host NUMA node do
not break any thread policies.
Currently the strategy for packing is to prefer siblings and try use
cores evenly by using emptier cores first. This is achieved by the
way we order cores in the sibling_sets structure, and the order in
which we iterate through it.
The main packing loop that iterates over the sibling_sets dictionary
will not currently try to look for a fit that maximizes number of
siblings, but will simply rely on the iteration ordering and picking
the first viable placement.
:param host_cell: objects.NUMACell instance - the host cell that
the instance should be pinned to
:param instance_cell: An instance of objects.InstanceNUMACell
describing the pinning requirements of the
instance
:param num_cpu_reserved: number of pCPUs reserved for hypervisor
:returns: An instance of objects.InstanceNUMACell containing the
pinning information, the physical cores reserved and
potentially a new topology to be exposed to the
instance. None if there is no valid way to satisfy the
sibling requirements for the instance.
"""
# get number of threads per core in host's cell
threads_per_core = max(map(len, host_cell.siblings)) or 1
LOG.debug('Packing an instance onto a set of siblings: '
' host_cell_free_siblings: %(siblings)s'
' instance_cell: %(cells)s'
' host_cell_id: %(host_cell_id)s'
' threads_per_core: %(threads_per_core)s'
' num_cpu_reserved: %(num_cpu_reserved)s',
{'siblings': host_cell.free_siblings,
'cells': instance_cell,
'host_cell_id': host_cell.id,
'threads_per_core': threads_per_core,
'num_cpu_reserved': num_cpu_reserved})
# We build up a data structure that answers the question: 'Given the
# number of threads I want to pack, give me a list of all the available
# sibling sets (or groups thereof) that can accommodate it'
sibling_sets: ty.Dict[int, ty.List[ty.Set[int]]] = (
collections.defaultdict(list)
)
for sib in host_cell.free_siblings:
for threads_no in range(1, len(sib) + 1):
sibling_sets[threads_no].append(sib)
LOG.debug('Built sibling_sets: %(siblings)s', {'siblings': sibling_sets})
pinning = None
threads_no = 1
def _orphans(instance_cell, threads_per_core):
"""Number of instance CPUs which will not fill up a host core.
Best explained by an example: consider set of free host cores as such:
[(0, 1), (3, 5), (6, 7, 8)]
This would be a case of 2 threads_per_core AKA an entry for 2 in the
sibling_sets structure.
If we attempt to pack a 5 core instance on it - due to the fact that we
iterate the list in order, we will end up with a single core of the
instance pinned to a thread "alone" (with id 6), and we would have one
'orphan' vcpu.
"""
return len(instance_cell) % threads_per_core
def _threads(instance_cell, threads_per_core):
"""Threads to expose to the instance via the VirtCPUTopology.
This is calculated by taking the GCD of the number of threads we are
considering at the moment, and the number of orphans. An example for
instance_cell = 6
threads_per_core = 4
So we can fit the instance as such:
[(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)]
x x x x x x
We can't expose 4 threads, as that will not be a valid topology (all
cores exposed to the guest have to have an equal number of threads),
and 1 would be too restrictive, but we want all threads that guest sees
to be on the same physical core, so we take GCD of 4 (max number of
threads) and 2 (number of 'orphan' CPUs) and get 2 as the number of
threads.
"""
return math.gcd(threads_per_core, _orphans(instance_cell,
threads_per_core))
def _get_pinning(threads_no, sibling_set, instance_cores):
"""Determines pCPUs/vCPUs mapping
Determines the pCPUs/vCPUs mapping regarding the number of
threads which can be used per cores.
:param threads_no: Number of host threads per cores which can
be used to pin vCPUs according to the
policies.
:param sibling_set: List of available threads per host cores
on a specific host NUMA node.
:param instance_cores: Set of vCPUs requested.
NOTE: Depending on how host is configured (HT/non-HT) a thread can
be considered as an entire core.
"""
if threads_no * len(sibling_set) < (len(instance_cores)):
return None
# Determines usable cores according the "threads number"
# constraint.
#
# For a sibling_set=[(0, 1, 2, 3), (4, 5, 6, 7)] and thread_no 1:
# usable_cores=[[0], [4]]
#
# For a sibling_set=[(0, 1, 2, 3), (4, 5, 6, 7)] and thread_no 2:
# usable_cores=[[0, 1], [4, 5]]
usable_cores = list(map(lambda s: list(s)[:threads_no], sibling_set))
# Determines the mapping vCPUs/pCPUs based on the sets of
# usable cores.
#
# For an instance_cores=[2, 3], usable_cores=[[0], [4]]
# vcpus_pinning=[(2, 0), (3, 4)]
vcpus_pinning = list(zip(sorted(instance_cores),
itertools.chain(*usable_cores)))
msg = ("Computed NUMA topology CPU pinning: usable pCPUs: "
"%(usable_cores)s, vCPUs mapping: %(vcpus_pinning)s")
msg_args = {
'usable_cores': usable_cores,
'vcpus_pinning': vcpus_pinning,
}
LOG.info(msg, msg_args)
return vcpus_pinning
def _get_reserved(sibling_set, vcpus_pinning, num_cpu_reserved=0,
cpu_thread_isolate=False):
"""Given available sibling_set, returns the pCPUs reserved
for hypervisor.
:param sibling_set: List of available threads per host cores
on a specific host NUMA node.
:param vcpus_pinning: List of tuple of (pCPU, vCPU) mapping.
:param num_cpu_reserved: Number of additional host CPUs which
need to be reserved.
:param cpu_thread_isolate: True if CPUThreadAllocationPolicy
is ISOLATE.
"""
if not vcpus_pinning:
return None
cpuset_reserved = None
usable_cores = list(map(lambda s: list(s), sibling_set))
if num_cpu_reserved:
# Updates the pCPUs used based on vCPUs pinned to.
# For the case vcpus_pinning=[(0, 0), (1, 2)] and
# usable_cores=[[0, 1], [2, 3], [4, 5]],
# if CPUThreadAllocationPolicy is isolated, we want
# to update usable_cores=[[4, 5]].
# If CPUThreadAllocationPolicy is *not* isolated,
# we want to update usable_cores=[[1],[3],[4, 5]].
for vcpu, pcpu in vcpus_pinning:
for sib in usable_cores:
if pcpu in sib:
if cpu_thread_isolate:
usable_cores.remove(sib)
else:
sib.remove(pcpu)
# Determines the pCPUs reserved for hypervisor
#
# For usable_cores=[[1],[3],[4, 5]], num_cpu_reserved=1
# cpuset_reserved=set([1])
cpuset_reserved = set(list(
itertools.chain(*usable_cores))[:num_cpu_reserved])
msg = ("Computed NUMA topology reserved pCPUs: usable pCPUs: "
"%(usable_cores)s, reserved pCPUs: %(cpuset_reserved)s")
msg_args = {
'usable_cores': usable_cores,
'cpuset_reserved': cpuset_reserved,
}
LOG.info(msg, msg_args)
return cpuset_reserved or None
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.REQUIRE):
LOG.debug("Requested 'require' thread policy for %d cores",
len(instance_cell))
elif (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.PREFER):
LOG.debug("Requested 'prefer' thread policy for %d cores",
len(instance_cell))
elif (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.ISOLATE):
LOG.debug("Requested 'isolate' thread policy for %d cores",
len(instance_cell))
else:
LOG.debug("User did not specify a thread policy. Using default "
"for %d cores", len(instance_cell))
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.ISOLATE):
# make sure we have at least one fully free core
if threads_per_core not in sibling_sets:
LOG.debug('Host does not have any fully free thread sibling sets.'
'It is not possible to emulate a non-SMT behavior '
'for the isolate policy without this.')
return
# TODO(stephenfin): Drop this when we drop support for 'vcpu_pin_set'
# NOTE(stephenfin): This is total hack. We're relying on the fact that
# the libvirt driver, which is the only one that currently supports
# pinned CPUs, will set cpuset and pcpuset to the same value if using
# legacy configuration, i.e. 'vcpu_pin_set', as part of
# '_get_host_numa_topology'. They can't be equal otherwise since
# 'cpu_dedicated_set' and 'cpu_shared_set' must be disjoint. Therefore,
# if these are equal, the host that this NUMA cell corresponds to is
# using legacy configuration and it's okay to use the old, "pin a core
# and reserve its siblings" implementation of the 'isolate' policy. If
# they're not, the host is using new-style configuration and we've just
# hit bug #1889633
if threads_per_core != 1 and host_cell.pcpuset != host_cell.cpuset:
LOG.warning(
"Host supports hyperthreads, but instance requested no "
"hyperthreads. This should have been rejected by the "
"scheduler but we likely got here due to the fallback VCPU "
"query. Consider setting '[workarounds] "
"disable_fallback_pcpu_query' to 'True' once hosts are no "
"longer using 'vcpu_pin_set'. Refer to bug #1889633 for more "
"information."
)
return
pinning = _get_pinning(
1, # we only want to "use" one thread per core
sibling_sets[threads_per_core],
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved,
cpu_thread_isolate=True)
if not pinning or (num_cpu_reserved and not cpuset_reserved):
pinning, cpuset_reserved = (None, None)
else: # REQUIRE, PREFER (explicit, implicit)
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.REQUIRE):
# make sure we actually have some siblings to play with
if threads_per_core <= 1:
LOG.info("Host does not support hyperthreading or "
"hyperthreading is disabled, but 'require' "
"threads policy was requested.")
return
# NOTE(ndipanov): We iterate over the sibling sets in descending order
# of cores that can be packed. This is an attempt to evenly distribute
# instances among physical cores
for threads_no, sibling_set in sorted(
(t for t in sibling_sets.items()), reverse=True):
# NOTE(sfinucan): The key difference between the require and
# prefer policies is that require will not settle for non-siblings
# if this is all that is available. Enforce this by ensuring we're
# using sibling sets that contain at least one sibling
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.REQUIRE):
if threads_no <= 1:
LOG.debug('Skipping threads_no: %s, as it does not satisfy'
' the require policy', threads_no)
continue
pinning = _get_pinning(
threads_no, sibling_set,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
if not pinning or (num_cpu_reserved and not cpuset_reserved):
continue
break
# NOTE(sfinucan): If siblings weren't available and we're using PREFER
# (implicitly or explicitly), fall back to linear assignment across
# cores
if (instance_cell.cpu_thread_policy !=
fields.CPUThreadAllocationPolicy.REQUIRE and
not pinning):
threads_no = 1
# we create a fake sibling set by splitting all sibling sets and
# treating each core as if it has no siblings. This is necessary
# because '_get_pinning' will normally only take the same amount of
# cores ('threads_no' cores) from each sibling set. This is rather
# desirable when we're seeking to apply a thread policy but it is
# less desirable when we only care about resource usage as we do
# here. By treating each core as independent, as we do here, we
# maximize resource usage for almost-full nodes at the expense of a
# possible performance impact to the guest.
sibling_set = [set([x]) for x in itertools.chain(*sibling_sets[1])]
pinning = _get_pinning(
threads_no, sibling_set,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
threads_no = _threads(instance_cell, threads_no)
if not pinning or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
topology = objects.VirtCPUTopology(sockets=1,
cores=len(pinning) // threads_no,
threads=threads_no)
instance_cell.pin_vcpus(*pinning)
instance_cell.cpu_topology = topology
instance_cell.id = host_cell.id
instance_cell.cpuset_reserved = cpuset_reserved
return instance_cell
def _numa_fit_instance_cell(
host_cell: 'objects.NUMACell',
instance_cell: 'objects.InstanceNUMACell',
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
cpuset_reserved: int = 0,
) -> ty.Optional['objects.InstanceNUMACell']:
"""Ensure an instance cell can fit onto a host cell
Ensure an instance cell can fit onto a host cell and, if so, return
a new objects.InstanceNUMACell with the id set to that of the host.
Returns None if the instance cell exceeds the limits of the host.
:param host_cell: host cell to fit the instance cell onto
:param instance_cell: instance cell we want to fit
:param limits: an objects.NUMATopologyLimit or None
:param cpuset_reserved: An int to indicate the number of CPUs overhead
:returns: objects.InstanceNUMACell with the id set to that of the
host, or None
"""
LOG.debug('Attempting to fit instance cell %(cell)s on host_cell '
'%(host_cell)s', {'cell': instance_cell, 'host_cell': host_cell})
if 'pagesize' in instance_cell and instance_cell.pagesize:
# The instance has requested a page size. Verify that the requested
# size is valid and that there are available pages of that size on the
# host.
pagesize = _numa_cell_supports_pagesize_request(
host_cell, instance_cell)
if not pagesize:
LOG.debug('Host does not support requested memory pagesize, '
'or not enough free pages of the requested size. '
'Requested: %d kB', instance_cell.pagesize)
return None
LOG.debug('Selected memory pagesize: %(selected_mem_pagesize)d kB. '
'Requested memory pagesize: %(requested_mem_pagesize)d '
'(small = -1, large = -2, any = -3)',
{'selected_mem_pagesize': pagesize,
'requested_mem_pagesize': instance_cell.pagesize})
instance_cell.pagesize = pagesize
else:
# The instance provides a NUMA topology but does not define any
# particular page size for its memory.
if host_cell.mempages:
# The host supports explicit page sizes. Use a pagesize-aware
# memory check using the smallest available page size.
pagesize = _get_smallest_pagesize(host_cell)
LOG.debug('No specific pagesize requested for instance, '
'selected pagesize: %d', pagesize)
# we want to allow overcommit in this case as we're not using
# hugepages
if not host_cell.can_fit_pagesize(pagesize,
instance_cell.memory * units.Ki,
use_free=False):
LOG.debug('Not enough available memory to schedule instance '
'with pagesize %(pagesize)d. Required: '
'%(required)s, available: %(available)s, total: '
'%(total)s.',
{'required': instance_cell.memory,
'available': host_cell.avail_memory,
'total': host_cell.memory,
'pagesize': pagesize})
return None
else:
# The host does not support explicit page sizes. Ignore pagesizes
# completely.
# NOTE(stephenfin): Do not allow an instance to overcommit against
# itself on any NUMA cell, i.e. with 'ram_allocation_ratio = 2.0'
# on a host with 1GB RAM, we should allow two 1GB instances but not
# one 2GB instance.
if instance_cell.memory > host_cell.memory:
LOG.debug('Not enough host cell memory to fit instance cell. '
'Required: %(required)d, actual: %(actual)d',
{'required': instance_cell.memory,
'actual': host_cell.memory})
return None
# NOTE(stephenfin): As with memory, do not allow an instance to overcommit
# against itself on any NUMA cell
if instance_cell.cpu_policy in (
fields.CPUAllocationPolicy.DEDICATED,
fields.CPUAllocationPolicy.MIXED,
):
required_cpus = len(instance_cell.pcpuset) + cpuset_reserved
if required_cpus > len(host_cell.pcpuset):
LOG.debug('Not enough host cell CPUs to fit instance cell; '
'required: %(required)d + %(cpuset_reserved)d as '
'overhead, actual: %(actual)d', {
'required': len(instance_cell.pcpuset),
'actual': len(host_cell.pcpuset),
'cpuset_reserved': cpuset_reserved
})
return None
else:
required_cpus = len(instance_cell.cpuset)
if required_cpus > len(host_cell.cpuset):
LOG.debug('Not enough host cell CPUs to fit instance cell; '
'required: %(required)d, actual: %(actual)d', {
'required': len(instance_cell.cpuset),
'actual': len(host_cell.cpuset),
})
return None
if instance_cell.cpu_policy in (
fields.CPUAllocationPolicy.DEDICATED,
fields.CPUAllocationPolicy.MIXED,
):
LOG.debug('Pinning has been requested')
required_cpus = len(instance_cell.pcpuset) + cpuset_reserved
if required_cpus > host_cell.avail_pcpus:
LOG.debug('Not enough available CPUs to schedule instance. '
'Oversubscription is not possible with pinned '
'instances. Required: %(required)d (%(vcpus)d + '
'%(num_cpu_reserved)d), actual: %(actual)d',
{'required': required_cpus,
'vcpus': len(instance_cell.pcpuset),
'actual': host_cell.avail_pcpus,
'num_cpu_reserved': cpuset_reserved})
return None
if instance_cell.memory > host_cell.avail_memory:
LOG.debug('Not enough available memory to schedule instance. '
'Oversubscription is not possible with pinned '
'instances. Required: %(required)s, available: '
'%(available)s, total: %(total)s. ',
{'required': instance_cell.memory,
'available': host_cell.avail_memory,
'total': host_cell.memory})
return None
# Try to pack the instance cell onto cores
instance_cell = _pack_instance_onto_cores(
host_cell, instance_cell, num_cpu_reserved=cpuset_reserved,
)
if not instance_cell:
LOG.debug('Failed to map instance cell CPUs to host cell CPUs')
return None
elif limits:
LOG.debug('No pinning requested, considering limitations on usable cpu'
' and memory')
cpu_usage = host_cell.cpu_usage + len(instance_cell.cpuset)
cpu_limit = len(host_cell.cpuset) * limits.cpu_allocation_ratio
if cpu_usage > cpu_limit:
LOG.debug('Host cell has limitations on usable CPUs. There are '
'not enough free CPUs to schedule this instance. '
'Usage: %(usage)d, limit: %(limit)d',
{'usage': cpu_usage, 'limit': cpu_limit})
return None
ram_usage = host_cell.memory_usage + instance_cell.memory
ram_limit = host_cell.memory * limits.ram_allocation_ratio
if ram_usage > ram_limit:
LOG.debug('Host cell has limitations on usable memory. There is '
'not enough free memory to schedule this instance. '
'Usage: %(usage)d, limit: %(limit)d',
{'usage': ram_usage, 'limit': ram_limit})
return None
instance_cell.id = host_cell.id
return instance_cell
def _get_flavor_image_meta(
key: str,
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
default: ty.Any = None,
) -> ty.Tuple[ty.Any, ty.Any]:
"""Extract both flavor- and image-based variants of metadata."""
flavor_key = ':'.join(['hw', key])
image_key = '_'.join(['hw', key])
flavor_value = flavor.get('extra_specs', {}).get(flavor_key, default)
image_value = image_meta.properties.get(image_key, default)
return flavor_value, image_value
def _get_unique_flavor_image_meta(
key: str,
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
default: ty.Any = None
) -> ty.Any:
"""A variant of '_get_flavor_image_meta' that errors out on conflicts."""
flavor_value, image_value = _get_flavor_image_meta(
key, flavor, image_meta, default,
)
if image_value and flavor_value and image_value != flavor_value:
msg = _(
"Flavor %(flavor_name)s has hw:%(key)s extra spec explicitly "
"set to %(flavor_val)s, conflicting with image %(image_name)s "
"which has hw_%(key)s explicitly set to %(image_val)s."
)
raise exception.FlavorImageConflict(
msg % {
'key': key,
'flavor_name': flavor.name,
'flavor_val': flavor_value,
'image_name': image_meta.name,
'image_val': image_value,
},
)
return flavor_value or image_value
def get_mem_encryption_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
machine_type: ty.Optional[str] = None,
) -> bool:
"""Return a boolean indicating whether encryption of guest memory was
requested, either via the hw:mem_encryption extra spec or the
hw_mem_encryption image property (or both).
Also watch out for contradictory requests between the flavor and
image regarding memory encryption, and raise an exception where
encountered. These conflicts can arise in two different ways:
1) the flavor requests memory encryption but the image
explicitly requests *not* to have memory encryption, or
vice-versa
2) the flavor and/or image request memory encryption, but the
image is missing hw_firmware_type=uefi
3) the flavor and/or image request memory encryption, but the
machine type is set to a value which does not contain 'q35'
This can be called from the libvirt driver on the compute node, in
which case the driver should pass the result of
nova.virt.libvirt.utils.get_machine_type() as the machine_type
parameter, or from the API layer, in which case get_machine_type()
cannot be called since it relies on being run from the compute
node in order to retrieve CONF.libvirt.hw_machine_type.
:param instance_type: Flavor object
:param image: an ImageMeta object
:param machine_type: a string representing the machine type (optional)
:raises: nova.exception.FlavorImageConflict
:raises: nova.exception.InvalidMachineType
:returns: boolean indicating whether encryption of guest memory
was requested
"""
flavor_mem_enc_str, image_mem_enc = _get_flavor_image_meta(
'mem_encryption', flavor, image_meta)
flavor_mem_enc = None
if flavor_mem_enc_str is not None:
flavor_mem_enc = strutils.bool_from_string(flavor_mem_enc_str)
# Image property is a FlexibleBooleanField, so coercion to a
# boolean is handled automatically
if not flavor_mem_enc and not image_mem_enc:
return False
_check_for_mem_encryption_requirement_conflicts(
flavor_mem_enc_str, flavor_mem_enc, image_mem_enc, flavor, image_meta)
# If we get this far, either the extra spec or image property explicitly
# specified a requirement regarding memory encryption, and if both did,
# they are asking for the same thing.
requesters = []
if flavor_mem_enc:
requesters.append("hw:mem_encryption extra spec in %s flavor" %
flavor.name)
if image_mem_enc:
requesters.append("hw_mem_encryption property of image %s" %
image_meta.name)
_check_mem_encryption_uses_uefi_image(requesters, image_meta)
_check_mem_encryption_machine_type(image_meta, machine_type)
LOG.debug("Memory encryption requested by %s", " and ".join(requesters))
return True
def _check_for_mem_encryption_requirement_conflicts(
flavor_mem_enc_str, flavor_mem_enc, image_mem_enc, flavor, image_meta):
# Check for conflicts between explicit requirements regarding
# memory encryption.
if (flavor_mem_enc is not None and image_mem_enc is not None and
flavor_mem_enc != image_mem_enc):
emsg = _(
"Flavor %(flavor_name)s has hw:mem_encryption extra spec "
"explicitly set to %(flavor_val)s, conflicting with "
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
'image_name': image_meta.name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
if image_meta.properties.get('hw_firmware_type') == 'uefi':
return
emsg = _(
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
)
data = {'requesters': " and ".join(requesters),
'image_name': image_meta.name}
raise exception.FlavorImageConflict(emsg % data)
def _check_mem_encryption_machine_type(image_meta, machine_type=None):
# NOTE(aspiers): As explained in the SEV spec, SEV needs a q35
# machine type in order to bind all the virtio devices to the PCIe
# bridge so that they use virtio 1.0 and not virtio 0.9, since
# QEMU's iommu_platform feature was added in virtio 1.0 only:
#
# http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html
#
# So if the image explicitly requests a machine type which is not
# in the q35 family, raise an exception.
#
# This check can be triggered both at API-level, at which point we
# can't check here what value of CONF.libvirt.hw_machine_type may
# have been configured on the compute node, and by the libvirt
# driver, in which case the driver can check that config option
# and will pass the machine_type parameter.
mach_type = machine_type or image_meta.properties.get('hw_machine_type')
# If hw_machine_type is not specified on the image and is not
# configured correctly on SEV compute nodes, then a separate check
# in the driver will catch that and potentially retry on other
# compute nodes.
if mach_type is None:
return
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
image_id=image_meta.id, image_name=image_meta.name,
reason=_("q35 type is required for SEV to work"))
def _get_numa_pagesize_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[int]:
"""Return the requested memory page size
:param flavor: a Flavor object to read extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:raises: MemoryPageSizeInvalid if flavor extra spec or image
metadata provides an invalid hugepage value
:raises: MemoryPageSizeForbidden if flavor extra spec request
conflicts with image metadata request
:returns: a page size requested or MEMPAGES_*
"""
def check_and_return_pages_size(request):
if request == "any":
return MEMPAGES_ANY
elif request == "large":
return MEMPAGES_LARGE
elif request == "small":
return MEMPAGES_SMALL
elif request.isdigit():
return int(request)
try:
return strutils.string_to_bytes(
request, return_int=True) / units.Ki
except ValueError:
raise exception.MemoryPageSizeInvalid(pagesize=request) from None
flavor_request, image_request = _get_flavor_image_meta(
'mem_page_size', flavor, image_meta)
if not flavor_request and image_request:
raise exception.MemoryPageSizeForbidden(
pagesize=image_request,
against="<empty>")
if not flavor_request:
# Nothing was specified for hugepages,
# let's the default process running.
return None
pagesize = check_and_return_pages_size(flavor_request)
if image_request and (pagesize in (MEMPAGES_ANY, MEMPAGES_LARGE)):
return check_and_return_pages_size(image_request)
elif image_request:
raise exception.MemoryPageSizeForbidden(
pagesize=image_request,
against=flavor_request)
return pagesize
def _get_constraint_mappings_from_flavor(flavor, key, func):
hw_numa_map = []
extra_specs = flavor.get('extra_specs', {})
for cellid in range(objects.ImageMetaProps.NUMA_NODES_MAX):
prop = '%s.%d' % (key, cellid)
if prop not in extra_specs:
break
hw_numa_map.append(func(extra_specs[prop]))
return hw_numa_map or None
def _get_numa_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[ty.List[ty.Set[int]]]:
"""Validate and return the requested guest NUMA-guest CPU mapping.
Extract the user-provided mapping of guest CPUs to guest NUMA nodes. For
example, the flavor extra spec ``hw:numa_cpus.0=0-1,4`` will map guest
cores ``0``, ``1``, ``4`` to guest NUMA node ``0``.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: exception.ImageNUMATopologyForbidden if both image metadata and
flavor extra specs are defined.
:return: An ordered list of sets of CPU indexes to assign to each guest
NUMA node if matching extra specs or image metadata properties found,
else None.
"""
flavor_cpu_list = _get_constraint_mappings_from_flavor(
flavor, 'hw:numa_cpus', parse_cpu_spec)
image_cpu_list = image_meta.properties.get('hw_numa_cpus', None)
if flavor_cpu_list is None:
return image_cpu_list
if image_cpu_list is not None:
raise exception.ImageNUMATopologyForbidden(
name='hw_numa_cpus')
return flavor_cpu_list
def _get_numa_mem_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[ty.List[int]]:
"""Validate and return the requested guest NUMA-guest memory mapping.
Extract the user-provided mapping of guest memory to guest NUMA nodes. For
example, the flavor extra spec ``hw:numa_mem.0=1`` will map 1 GB of guest
memory to guest NUMA node ``0``.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: exception.ImageNUMATopologyForbidden if both image metadata and
flavor extra specs are defined
:return: An ordered list of memory (in GB) to assign to each guest NUMA
node if matching extra specs or image metadata properties found, else
None.
"""
flavor_mem_list = _get_constraint_mappings_from_flavor(
flavor, 'hw:numa_mem', int)
image_mem_list = image_meta.properties.get('hw_numa_mem', None)
if flavor_mem_list is None:
return image_mem_list
if image_mem_list is not None:
raise exception.ImageNUMATopologyForbidden(
name='hw_numa_mem')
return flavor_mem_list
def _get_numa_node_count_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[int]:
"""Validate and return the requested NUMA nodes.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: exception.ImageNUMATopologyForbidden if both image metadata and
flavor extra specs are defined
:raises: exception.InvalidNUMANodesNumber if the number of NUMA
nodes is less than 1 or not an integer
:returns: The number of NUMA nodes requested in either the flavor or image,
else None.
"""
flavor_nodes, image_nodes = _get_flavor_image_meta(
'numa_nodes', flavor, image_meta)
if flavor_nodes and image_nodes:
raise exception.ImageNUMATopologyForbidden(name='hw_numa_nodes')
nodes = flavor_nodes or image_nodes
if nodes is not None and (not strutils.is_int_like(nodes) or
int(nodes) < 1):
raise exception.InvalidNUMANodesNumber(nodes=nodes)
return int(nodes) if nodes else nodes
# NOTE(stephenfin): This must be public as it's used elsewhere
def get_cpu_policy_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[str]:
"""Validate and return the requested CPU policy.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: exception.ImageCPUPinningForbidden if policy is defined on both
image and flavor and these policies conflict.
:raises: exception.InvalidCPUAllocationPolicy if policy is defined with
invalid value in image or flavor.
:returns: The CPU policy requested.
"""
flavor_policy, image_policy = _get_flavor_image_meta(
'cpu_policy', flavor, image_meta)
if flavor_policy and (flavor_policy not in fields.CPUAllocationPolicy.ALL):
raise exception.InvalidCPUAllocationPolicy(
source='flavor extra specs',
requested=flavor_policy,
available=str(fields.CPUAllocationPolicy.ALL))
if image_policy and (image_policy not in fields.CPUAllocationPolicy.ALL):
raise exception.InvalidCPUAllocationPolicy(
source='image properties',
requested=image_policy,
available=str(fields.CPUAllocationPolicy.ALL))
if flavor_policy == fields.CPUAllocationPolicy.DEDICATED:
cpu_policy = flavor_policy
elif flavor_policy == fields.CPUAllocationPolicy.MIXED:
if image_policy == fields.CPUAllocationPolicy.DEDICATED:
raise exception.ImageCPUPinningForbidden()
cpu_policy = flavor_policy
elif flavor_policy == fields.CPUAllocationPolicy.SHARED:
if image_policy in (
fields.CPUAllocationPolicy.MIXED,
fields.CPUAllocationPolicy.DEDICATED,
):
raise exception.ImageCPUPinningForbidden()
cpu_policy = flavor_policy
elif image_policy in fields.CPUAllocationPolicy.ALL:
cpu_policy = image_policy
else:
cpu_policy = None
return cpu_policy
# NOTE(stephenfin): This must be public as it's used elsewhere
def get_cpu_thread_policy_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[str]:
"""Validate and return the requested CPU thread policy.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: exception.ImageCPUThreadPolicyForbidden if policy is defined on
both image and flavor and these policies conflict.
:raises: exception.InvalidCPUThreadAllocationPolicy if policy is defined
with invalid value in image or flavor.
:returns: The CPU thread policy requested.
"""
flavor_policy, image_policy = _get_flavor_image_meta(
'cpu_thread_policy', flavor, image_meta)
if flavor_policy and (
flavor_policy not in fields.CPUThreadAllocationPolicy.ALL):
raise exception.InvalidCPUThreadAllocationPolicy(
source='flavor extra specs',
requested=flavor_policy,
available=str(fields.CPUThreadAllocationPolicy.ALL))
if image_policy and (
image_policy not in fields.CPUThreadAllocationPolicy.ALL):
raise exception.InvalidCPUThreadAllocationPolicy(
source='image properties',
requested=image_policy,
available=str(fields.CPUThreadAllocationPolicy.ALL))
if flavor_policy in [None, fields.CPUThreadAllocationPolicy.PREFER]:
policy = flavor_policy or image_policy
elif image_policy and image_policy != flavor_policy:
raise exception.ImageCPUThreadPolicyForbidden()
else:
policy = flavor_policy
return policy
def _get_numa_topology_auto(
nodes: int,
flavor: 'objects.Flavor',
vcpus: ty.Set[int],
pcpus: ty.Set[int],
) -> 'objects.InstanceNUMATopology':
"""Generate a NUMA topology automatically based on CPUs and memory.
This is "automatic" because there's no user-provided per-node configuration
here - it's all auto-generated based on the number of nodes.
:param nodes: The number of nodes required in the generated topology.
:param flavor: The flavor used for the instance, from which to extract the
CPU and memory count.
:param vcpus: A set of IDs for CPUs that should be shared.
:param pcpus: A set of IDs for CPUs that should be dedicated.
"""
if (flavor.vcpus % nodes) > 0 or (flavor.memory_mb % nodes) > 0:
raise exception.ImageNUMATopologyAsymmetric()
cells = []
for node in range(nodes):
ncpus = int(flavor.vcpus / nodes)
mem = int(flavor.memory_mb / nodes)
start = node * ncpus
cpus = set(range(start, start + ncpus))
cells.append(objects.InstanceNUMACell(
id=node, cpuset=cpus & vcpus, pcpuset=cpus & pcpus, memory=mem))
return objects.InstanceNUMATopology(cells=cells)
def _get_numa_topology_manual(
nodes: int,
flavor: 'objects.Flavor',
vcpus: ty.Set[int],
pcpus: ty.Set[int],
cpu_list: ty.List[ty.Set[int]],
mem_list: ty.List[int],
) -> 'objects.InstanceNUMATopology':
"""Generate a NUMA topology based on user-provided NUMA topology hints.
:param nodes: The number of nodes required in the generated topology.
:param flavor: The flavor used for the instance, from which to extract the
CPU and memory count.
:param vcpus: A set of IDs for CPUs that should be shared.
:param pcpus: A set of IDs for CPUs that should be dedicated.
:param cpu_list: A list of sets of ints; each set in the list corresponds
to the set of guest cores to assign to NUMA node $index.
:param mem_list: A list of ints; each int corresponds to the amount of
memory to assign to NUMA node $index.
:returns: The generated instance NUMA topology.
"""
cells = []
totalmem = 0
availcpus = set(range(flavor.vcpus))
for node in range(nodes):
mem = mem_list[node]
cpus = cpu_list[node]
for cpu in cpus:
if cpu > (flavor.vcpus - 1):
raise exception.ImageNUMATopologyCPUOutOfRange(
cpunum=cpu, cpumax=(flavor.vcpus - 1))
if cpu not in availcpus:
raise exception.ImageNUMATopologyCPUDuplicates(
cpunum=cpu)
availcpus.remove(cpu)
cells.append(objects.InstanceNUMACell(
id=node, cpuset=cpus & vcpus, pcpuset=cpus & pcpus, memory=mem))
totalmem = totalmem + mem
if availcpus:
raise exception.ImageNUMATopologyCPUsUnassigned(
cpuset=str(availcpus))
if totalmem != flavor.memory_mb:
raise exception.ImageNUMATopologyMemoryOutOfRange(
memsize=totalmem,
memtotal=flavor.memory_mb)
return objects.InstanceNUMATopology(cells=cells)
def is_realtime_enabled(flavor):
flavor_rt = flavor.get('extra_specs', {}).get("hw:cpu_realtime")
return strutils.bool_from_string(flavor_rt)
def _get_vcpu_pcpu_resources(
flavor: 'objects.Flavor',
) -> ty.Tuple[int, int]:
requested_vcpu = 0
requested_pcpu = 0
for key, val in flavor.get('extra_specs', {}).items():
if re.match('resources([1-9][0-9]*)?:%s' % orc.VCPU, key):
try:
requested_vcpu += int(val)
except ValueError:
# this is handled elsewhere
pass
if re.match('resources([1-9][0-9]*)?:%s' % orc.PCPU, key):
try:
requested_pcpu += int(val)
except ValueError:
# this is handled elsewhere
pass
return requested_vcpu, requested_pcpu
def _get_hyperthreading_trait(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[str]:
for key, val in flavor.get('extra_specs', {}).items():
if re.match('trait([1-9][0-9]*)?:%s' % os_traits.HW_CPU_HYPERTHREADING,
key):
return val
if os_traits.HW_CPU_HYPERTHREADING in image_meta.properties.get(
'traits_required', []):
return 'required'
return None
# NOTE(stephenfin): This must be public as it's used elsewhere
def get_dedicated_cpu_constraint(
flavor: 'objects.Flavor',
) -> ty.Optional[ty.Set[int]]:
"""Validate and return the requested dedicated CPU mask.
:param flavor: ``nova.objects.Flavor`` instance
:returns: The dedicated CPUs requested, else None.
"""
mask = flavor.get('extra_specs', {}).get('hw:cpu_dedicated_mask')
if not mask:
return None
if mask.strip().startswith('^'):
pcpus = parse_cpu_spec("0-%d,%s" % (flavor.vcpus - 1, mask))
else:
pcpus = parse_cpu_spec("%s" % (mask))
cpus = set(range(flavor.vcpus))
vcpus = cpus - pcpus
if not pcpus or not vcpus:
raise exception.InvalidMixedInstanceDedicatedMask()
if not pcpus.issubset(cpus):
msg = _('Mixed instance dedicated vCPU(s) mask is not a subset of '
'vCPUs in the flavor. See "hw:cpu_dedicated_mask"')
raise exception.InvalidMixedInstanceDedicatedMask(msg)
return pcpus
# NOTE(stephenfin): This must be public as it's used elsewhere
def get_realtime_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[ty.Set[int]]:
"""Validate and return the requested realtime CPU mask.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:returns: The realtime CPU set requested, else None.
"""
if not is_realtime_enabled(flavor):
return None
flavor_mask, image_mask = _get_flavor_image_meta(
'cpu_realtime_mask', flavor, image_meta)
# Image masks are used ahead of flavor masks as they will have more
# specific requirements
mask = image_mask or flavor_mask
vcpus_set = set(range(flavor.vcpus))
if mask:
if mask.strip().startswith('^'):
vcpus_rt = parse_cpu_spec("0-%d,%s" % (flavor.vcpus - 1, mask))
else:
vcpus_rt = parse_cpu_spec("%s" % (mask))
else:
vcpus_rt = set(range(flavor.vcpus))
if not vcpus_rt:
raise exception.RealtimeMaskNotFoundOrInvalid()
# TODO(stephenfin): Do this check in numa_get_constraints instead
emu_policy = get_emulator_thread_policy_constraint(flavor)
if vcpus_set == vcpus_rt and not emu_policy:
raise exception.RealtimeMaskNotFoundOrInvalid()
if not vcpus_rt.issubset(vcpus_set):
msg = _("Realtime policy vCPU(s) mask is configured with RT vCPUs "
"that are not a subset of the vCPUs in the flavor. See "
"hw:cpu_realtime_mask or hw_cpu_realtime_mask")
raise exception.RealtimeMaskNotFoundOrInvalid(msg)
return vcpus_rt
# NOTE(stephenfin): This must be public as it's used elsewhere
def get_emulator_thread_policy_constraint(
flavor: 'objects.Flavor',
) -> ty.Optional[str]:
"""Validate and return the requested emulator threads policy.
:param flavor: ``nova.objects.Flavor`` instance
:raises: exception.InvalidEmulatorThreadsPolicy if mask was not found or
is invalid.
:returns: The emulator thread policy requested, else None.
"""
emu_threads_policy = flavor.get('extra_specs', {}).get(
'hw:emulator_threads_policy')
if not emu_threads_policy:
return None
if emu_threads_policy not in fields.CPUEmulatorThreadsPolicy.ALL:
raise exception.InvalidEmulatorThreadsPolicy(
requested=emu_threads_policy,
available=str(fields.CPUEmulatorThreadsPolicy.ALL))
return emu_threads_policy
def get_pci_numa_policy_constraint(flavor, image_meta):
"""Return pci numa affinity policy or None.
:param flavor: a flavor object to read extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:raises: nova.exception.ImagePCINUMAPolicyForbidden
:raises: nova.exception.InvalidPCINUMAAffinity
"""
flavor_policy, image_policy = _get_flavor_image_meta(
'pci_numa_affinity_policy', flavor, image_meta)
if flavor_policy and image_policy and flavor_policy != image_policy:
raise exception.ImagePCINUMAPolicyForbidden()
policy = flavor_policy or image_policy
if policy and policy not in fields.PCINUMAAffinityPolicy.ALL:
raise exception.InvalidPCINUMAAffinity(policy=policy)
return policy
def get_vtpm_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
) -> ty.Optional[VTPMConfig]:
"""Validate and return the requested vTPM configuration.
:param flavor: ``nova.objects.Flavor`` instance
:param image_meta: ``nova.objects.ImageMeta`` instance
:raises: nova.exception.FlavorImageConflict if a value is specified in both
the flavor and the image, but the values do not match
:raises: nova.exception.Invalid if a value or combination of values is
invalid
:returns: A named tuple containing the vTPM version and model, else None.
"""
version = _get_unique_flavor_image_meta('tpm_version', flavor, image_meta)
if version is None:
return None
if version not in fields.TPMVersion.ALL:
raise exception.Invalid(
"Invalid TPM version %(version)r. Allowed values: %(valid)s." %
{'version': version, 'valid': ', '.join(fields.TPMVersion.ALL)}
)
model = _get_unique_flavor_image_meta('tpm_model', flavor, image_meta)
if model is None:
# model defaults to TIS
model = fields.TPMModel.TIS
elif model not in fields.TPMModel.ALL:
raise exception.Invalid(
"Invalid TPM model %(model)r. Allowed values: %(valid)s." %
{'model': model, 'valid': ', '.join(fields.TPMModel.ALL)}
)
elif model == fields.TPMModel.CRB and version != fields.TPMVersion.v2_0:
raise exception.Invalid(
"TPM model CRB is only valid with TPM version 2.0."
)
return VTPMConfig(version, model)
def numa_get_constraints(flavor, image_meta):
"""Return topology related to input request.
:param flavor: a flavor object to read extra specs from
:param image_meta: nova.objects.ImageMeta object instance
:raises: exception.InvalidNUMANodesNumber if the number of NUMA
nodes is less than 1 or not an integer
:raises: exception.ImageNUMATopologyForbidden if an attempt is made
to override flavor settings with image properties
:raises: exception.MemoryPageSizeInvalid if flavor extra spec or
image metadata provides an invalid hugepage value
:raises: exception.MemoryPageSizeForbidden if flavor extra spec
request conflicts with image metadata request
:raises: exception.ImageNUMATopologyIncomplete if the image
properties are not correctly specified
:raises: exception.ImageNUMATopologyAsymmetric if the number of
NUMA nodes is not a factor of the requested total CPUs or
memory
:raises: exception.ImageNUMATopologyCPUOutOfRange if an instance
CPU given in a NUMA mapping is not valid
:raises: exception.ImageNUMATopologyCPUDuplicates if an instance
CPU is specified in CPU mappings for two NUMA nodes
:raises: exception.ImageNUMATopologyCPUsUnassigned if an instance
CPU given in a NUMA mapping is not assigned to any NUMA node
:raises: exception.ImageNUMATopologyMemoryOutOfRange if sum of memory from
each NUMA node is not equal with total requested memory
:raises: exception.ImageCPUPinningForbidden if a CPU policy
specified in a flavor conflicts with one defined in image
metadata
:raises: exception.RealtimeConfigurationInvalid if realtime is
requested but dedicated CPU policy is not also requested
:raises: exception.RealtimeMaskNotFoundOrInvalid if realtime is
requested but no mask provided
:raises: exception.CPUThreadPolicyConfigurationInvalid if a CPU thread
policy conflicts with CPU allocation policy
:raises: exception.ImageCPUThreadPolicyForbidden if a CPU thread policy
specified in a flavor conflicts with one defined in image metadata
:raises: exception.BadRequirementEmulatorThreadsPolicy if CPU emulator
threads policy conflicts with CPU allocation policy
:raises: exception.InvalidCPUAllocationPolicy if policy is defined with
invalid value in image or flavor.
:raises: exception.InvalidCPUThreadAllocationPolicy if policy is defined
with invalid value in image or flavor.
:raises: exception.InvalidRequest if there is a conflict between explicitly
and implicitly requested resources of hyperthreading traits
:raises: exception.RequiredMixedInstancePolicy if dedicated CPU mask is
provided in flavor while CPU policy is not 'mixed'.
:raises: exception.RequiredMixedOrRealtimeCPUMask the mixed policy instance
dedicated CPU mask can only be specified through either
'hw:cpu_realtime_mask' or 'hw:cpu_dedicated_mask', not both.
:raises: exception.InvalidMixedInstanceDedicatedMask if specify an invalid
CPU mask for 'hw:cpu_dedicated_mask'.
:returns: objects.InstanceNUMATopology, or None
"""
cpu_policy = get_cpu_policy_constraint(flavor, image_meta)
cpu_thread_policy = get_cpu_thread_policy_constraint(flavor, image_meta)
realtime_cpus = get_realtime_cpu_constraint(flavor, image_meta)
dedicated_cpus = get_dedicated_cpu_constraint(flavor)
emu_threads_policy = get_emulator_thread_policy_constraint(flavor)
# handle explicit VCPU/PCPU resource requests and the HW_CPU_HYPERTHREADING
# trait
requested_vcpus, requested_pcpus = _get_vcpu_pcpu_resources(flavor)
if cpu_policy and (requested_vcpus or requested_pcpus):
raise exception.InvalidRequest(
"It is not possible to use the 'resources:VCPU' or "
"'resources:PCPU' extra specs in combination with the "
"'hw:cpu_policy' extra spec or 'hw_cpu_policy' image metadata "
"property; use one or the other")
if requested_vcpus and requested_pcpus:
raise exception.InvalidRequest(
"It is not possible to specify both 'resources:VCPU' and "
"'resources:PCPU' extra specs; use one or the other")
if requested_pcpus:
if (emu_threads_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE and
flavor.vcpus + 1 != requested_pcpus):
raise exception.InvalidRequest(
"You have requested 'hw:emulator_threads_policy=isolate' but "
"have not requested sufficient PCPUs to handle this policy; "
"you must allocate exactly flavor.vcpus + 1 PCPUs.")
if (emu_threads_policy != fields.CPUEmulatorThreadsPolicy.ISOLATE and
flavor.vcpus != requested_pcpus):
raise exception.InvalidRequest(
"There is a mismatch between the number of PCPUs requested "
"via 'resourcesNN:PCPU' and the flavor); you must allocate "
"exactly flavor.vcpus PCPUs")
cpu_policy = fields.CPUAllocationPolicy.DEDICATED
if requested_vcpus:
# NOTE(stephenfin): It would be nice if we could error out if
# flavor.vcpus != resources:PCPU, but that would be a breaking change.
# Better to wait until we remove flavor.vcpus or something
cpu_policy = fields.CPUAllocationPolicy.SHARED
hyperthreading_trait = _get_hyperthreading_trait(flavor, image_meta)
if cpu_thread_policy and hyperthreading_trait:
raise exception.InvalidRequest(
"It is not possible to use the 'trait:HW_CPU_HYPERTHREADING' "
"extra spec in combination with the 'hw:cpu_thread_policy' "
"extra spec or 'hw_cpu_thread_policy' image metadata property; "
"use one or the other")
if hyperthreading_trait == 'forbidden':
cpu_thread_policy = fields.CPUThreadAllocationPolicy.ISOLATE
elif hyperthreading_trait == 'required':
cpu_thread_policy = fields.CPUThreadAllocationPolicy.REQUIRE
# sanity checks
if cpu_policy in (fields.CPUAllocationPolicy.SHARED, None):
if cpu_thread_policy:
raise exception.CPUThreadPolicyConfigurationInvalid()
if emu_threads_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE:
raise exception.BadRequirementEmulatorThreadsPolicy()
# 'hw:cpu_dedicated_mask' should not be defined in a flavor with
# 'shared' policy.
if dedicated_cpus:
raise exception.RequiredMixedInstancePolicy()
if realtime_cpus:
raise exception.RealtimeConfigurationInvalid()
elif cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
# 'hw:cpu_dedicated_mask' should not be defined in a flavor with
# 'dedicated' policy.
if dedicated_cpus:
raise exception.RequiredMixedInstancePolicy()
else: # MIXED
if realtime_cpus and dedicated_cpus:
raise exception.RequiredMixedOrRealtimeCPUMask()
if not (realtime_cpus or dedicated_cpus):
raise exception.RequiredMixedOrRealtimeCPUMask()
# NOTE(huaquiang): If using mixed with realtime, then cores listed in
# the realtime mask are dedicated and everything else is shared.
dedicated_cpus = dedicated_cpus or realtime_cpus
nodes = _get_numa_node_count_constraint(flavor, image_meta)
pagesize = _get_numa_pagesize_constraint(flavor, image_meta)
vpmems = get_vpmems(flavor)
# If 'hw:cpu_dedicated_mask' is not found in flavor extra specs, the
# 'dedicated_cpus' variable is None, while we hope it being an empty set.
dedicated_cpus = dedicated_cpus or set()
if cpu_policy == fields.CPUAllocationPolicy.DEDICATED:
# But for an instance with 'dedicated' CPU allocation policy, all
# CPUs are 'dedicated' CPUs, which is 1:1 pinned to a host CPU.
dedicated_cpus = set(range(flavor.vcpus))
# NOTE(stephenfin): There are currently four things that will configure a
# NUMA topology for an instance:
#
# - The user explicitly requesting one
# - The use of CPU pinning
# - The use of hugepages
# - The use of vPMEM
if nodes or pagesize or vpmems or cpu_policy in (
fields.CPUAllocationPolicy.DEDICATED,
fields.CPUAllocationPolicy.MIXED,
):
# NOTE(huaqiang): Here we build the instance dedicated CPU set and the
# shared CPU set, through 'pcpus' and 'vcpus' respectively,
# which will be used later to calculate the per-NUMA-cell CPU set.
cpus = set(range(flavor.vcpus))
pcpus = dedicated_cpus
vcpus = cpus - pcpus
nodes = nodes or 1
cpu_list = _get_numa_cpu_constraint(flavor, image_meta)
mem_list = _get_numa_mem_constraint(flavor, image_meta)
if cpu_list is None and mem_list is None:
numa_topology = _get_numa_topology_auto(
nodes, flavor, vcpus, pcpus,
)
elif cpu_list is not None and mem_list is not None:
# If any node has data set, all nodes must have data set
if len(cpu_list) != nodes or len(mem_list) != nodes:
raise exception.ImageNUMATopologyIncomplete()
numa_topology = _get_numa_topology_manual(
nodes, flavor, vcpus, pcpus, cpu_list, mem_list
)
else:
# If one property list is specified both must be
raise exception.ImageNUMATopologyIncomplete()
# We currently support the same pagesize, CPU policy and CPU thread
# policy for all cells, but these are still stored on a per-cell
# basis :(
for c in numa_topology.cells:
setattr(c, 'pagesize', pagesize)
setattr(c, 'cpu_policy', cpu_policy)
setattr(c, 'cpu_thread_policy', cpu_thread_policy)
# ...but emulator threads policy is not \o/
numa_topology.emulator_threads_policy = emu_threads_policy
else:
numa_topology = None
return numa_topology
def _numa_cells_support_network_metadata(
host_topology: 'objects.NUMATopology',
chosen_host_cells: ty.List['objects.NUMACell'],
network_metadata: 'objects.NetworkMetadata',
) -> bool:
"""Determine whether the cells can accept the network requests.
:param host_topology: The entire host topology, used to find non-chosen
host cells.
:param chosen_host_cells: List of NUMACells to extract possible network
NUMA affinity from.
:param network_metadata: The combined summary of physnets and tunneled
networks required by this topology or None.
:return: True if any NUMA affinity constraints for requested networks can
be satisfied, else False
"""
if not network_metadata:
return True
required_physnets: ty.Set[str] = set()
if 'physnets' in network_metadata:
# use set() to avoid modifying the original data structure
required_physnets = set(network_metadata.physnets)
required_tunnel: bool = False
if 'tunneled' in network_metadata:
required_tunnel = network_metadata.tunneled
if required_physnets:
# identify requested physnets that have an affinity to any of our
# chosen host NUMA cells
for host_cell in chosen_host_cells:
if 'network_metadata' not in host_cell:
continue
# if one of these cells provides affinity for one or more physnets,
# drop said physnet(s) from the list we're searching for
required_physnets -= required_physnets.intersection(
host_cell.network_metadata.physnets)
# however, if we still require some level of NUMA affinity, we need
# to make sure one of the other NUMA cells isn't providing that; note
# that NUMA affinity might not be provided for all physnets so we are
# in effect skipping these
for host_cell in host_topology.cells:
if 'network_metadata' not in host_cell:
continue
# if one of these cells provides affinity for one or more physnets,
# we need to fail because we should be using that node and are not
if required_physnets.intersection(
host_cell.network_metadata.physnets):
return False
if required_tunnel:
# identify if tunneled networks have an affinity to any of our chosen
# host NUMA cells
for host_cell in chosen_host_cells:
if 'network_metadata' not in host_cell:
continue
if host_cell.network_metadata.tunneled:
return True
# however, if we still require some level of NUMA affinity, we need to
# make sure one of the other NUMA cells isn't providing that; note
# that, as with physnets, NUMA affinity might not be defined for
# tunneled networks and we'll simply continue if this is the case
for host_cell in host_topology.cells:
if 'network_metadata' not in host_cell:
continue
if host_cell.network_metadata.tunneled:
return False
return True
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
):
"""Fit the instance topology onto the host topology.
Given a host, instance topology, and (optional) limits, attempt to
fit instance cells onto all permutations of host cells by calling
the _fit_instance_cell method, and return a new InstanceNUMATopology
with its cell ids set to host cell ids of the first successful
permutation, or None.
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
:returns: objects.InstanceNUMATopology with its cell IDs set to host
cell ids of the first successful permutation, or None
"""
if not (host_topology and instance_topology):
LOG.debug("Require both a host and instance NUMA topology to "
"fit instance on host.")
return
elif len(host_topology) < len(instance_topology):
LOG.debug("There are not enough NUMA nodes on the system to schedule "
"the instance correctly. Required: %(required)s, actual: "
"%(actual)s",
{'required': len(instance_topology),
'actual': len(host_topology)})
return
emulator_threads_policy = None
if 'emulator_threads_policy' in instance_topology:
emulator_threads_policy = instance_topology.emulator_threads_policy
network_metadata = None
if limits and 'network_metadata' in limits:
network_metadata = limits.network_metadata
host_cells = host_topology.cells
# If PCI device(s) are not required, prefer host cells that don't have
# devices attached. Presence of a given numa_node in a PCI pool is
# indicative of a PCI device being associated with that node
if not pci_requests and pci_stats:
# TODO(stephenfin): pci_stats can't be None here but mypy can't figure
# that out for some reason
host_cells = sorted(host_cells, key=lambda cell: cell.id in [
pool['numa_node'] for pool in pci_stats.pools]) # type: ignore
for host_cell_perm in itertools.permutations(
host_cells, len(instance_topology)):
chosen_instance_cells: ty.List['objects.InstanceNUMACell'] = []
chosen_host_cells: ty.List['objects.NUMACell'] = []
for host_cell, instance_cell in zip(
host_cell_perm, instance_topology.cells):
try:
cpuset_reserved = 0
if (instance_topology.emulator_threads_isolated and
len(chosen_instance_cells) == 0):
# For the case of isolate emulator threads, to
# make predictable where that CPU overhead is
# located we always configure it to be on host
# NUMA node associated to the guest NUMA node
# 0.
cpuset_reserved = 1
got_cell = _numa_fit_instance_cell(
host_cell, instance_cell, limits, cpuset_reserved)
except exception.MemoryPageSizeNotSupported:
# This exception will been raised if instance cell's
# custom pagesize is not supported with host cell in
# _numa_cell_supports_pagesize_request function.
break
if got_cell is None:
break
chosen_host_cells.append(host_cell)
chosen_instance_cells.append(got_cell)
if len(chosen_instance_cells) != len(host_cell_perm):
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
pci_requests, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
host_topology, chosen_host_cells, network_metadata):
continue
return objects.InstanceNUMATopology(
cells=chosen_instance_cells,
emulator_threads_policy=emulator_threads_policy)
def numa_get_reserved_huge_pages():
"""Returns reserved memory pages from host option.
Based from the compute node option reserved_huge_pages, generate
a well formatted list of dict which can be used to build a valid
NUMATopology.
:raises: exception.InvalidReservedMemoryPagesOption when
reserved_huge_pages option is not correctly set.
:returns: A dict of dicts keyed by NUMA node IDs; keys of child dict
are pages size and values of the number reserved.
"""
if not CONF.reserved_huge_pages:
return {}
try:
bucket: ty.Dict[int, ty.Dict[int, int]] = collections.defaultdict(dict)
for cfg in CONF.reserved_huge_pages:
try:
pagesize = int(cfg['size'])
except ValueError:
pagesize = strutils.string_to_bytes(
cfg['size'], return_int=True) / units.Ki
bucket[int(cfg['node'])][pagesize] = int(cfg['count'])
except (ValueError, TypeError, KeyError):
raise exception.InvalidReservedMemoryPagesOption(
conf=CONF.reserved_huge_pages)
return bucket
def _get_smallest_pagesize(host_cell):
"""Returns the smallest available page size based on hostcell"""
avail_pagesize = [page.size_kb for page in host_cell.mempages]
avail_pagesize.sort()
return avail_pagesize[0]
def _numa_pagesize_usage_from_cell(host_cell, instance_cell, sign):
if 'pagesize' in instance_cell and instance_cell.pagesize:
pagesize = instance_cell.pagesize
else:
pagesize = _get_smallest_pagesize(host_cell)
topo = []
for pages in host_cell.mempages:
if pages.size_kb == pagesize:
topo.append(objects.NUMAPagesTopology(
size_kb=pages.size_kb,
total=pages.total,
used=max(0, pages.used +
instance_cell.memory * units.Ki /
pages.size_kb * sign),
reserved=pages.reserved if 'reserved' in pages else 0))
else:
topo.append(pages)
return topo
def numa_usage_from_instance_numa(host_topology, instance_topology,
free=False):
"""Update the host topology usage.
Update the host NUMA topology based on usage by the provided instance NUMA
topology.
:param host_topology: objects.NUMATopology to update usage information
:param instance_topology: objects.InstanceNUMATopology from which to
retrieve usage information.
:param free: If true, decrease, rather than increase, host usage based on
instance usage.
:returns: Updated objects.NUMATopology for host
"""
if not host_topology or not instance_topology:
return host_topology
cells = []
sign = -1 if free else 1
for host_cell in host_topology.cells:
memory_usage = host_cell.memory_usage
shared_cpus_usage = host_cell.cpu_usage
new_cell = objects.NUMACell(
id=host_cell.id,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
pinned_cpus=host_cell.pinned_cpus,
siblings=host_cell.siblings)
if 'network_metadata' in host_cell:
new_cell.network_metadata = host_cell.network_metadata
for cellid, instance_cell in enumerate(instance_topology.cells):
if instance_cell.id != host_cell.id:
continue
new_cell.mempages = _numa_pagesize_usage_from_cell(
new_cell, instance_cell, sign)
memory_usage = memory_usage + sign * instance_cell.memory
shared_cpus_usage += sign * len(instance_cell.cpuset)
if instance_cell.cpu_policy in (
None, fields.CPUAllocationPolicy.SHARED,
):
continue
pinned_cpus = set(instance_cell.cpu_pinning.values())
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved
if free:
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.ISOLATE):
new_cell.unpin_cpus_with_siblings(pinned_cpus)
else:
new_cell.unpin_cpus(pinned_cpus)
else:
if (instance_cell.cpu_thread_policy ==
fields.CPUThreadAllocationPolicy.ISOLATE):
new_cell.pin_cpus_with_siblings(pinned_cpus)
else:
new_cell.pin_cpus(pinned_cpus)
# NOTE(stephenfin): We don't need to set 'pinned_cpus' here since that
# was done in the above '(un)pin_cpus(_with_siblings)' functions
new_cell.memory_usage = max(0, memory_usage)
new_cell.cpu_usage = max(0, shared_cpus_usage)
cells.append(new_cell)
return objects.NUMATopology(cells=cells)
def get_vpmems(flavor):
"""Return vpmems related to input request.
:param flavor: a flavor object to read extra specs from
:returns: a vpmem label list
"""
vpmems_info = flavor.get('extra_specs', {}).get('hw:pmem')
if not vpmems_info:
return []
vpmem_labels = vpmems_info.split(',')
formed_labels = []
for label in vpmem_labels:
formed_label = label.strip()
if formed_label:
formed_labels.append(formed_label)
return formed_labels
def check_hw_rescue_props(image_meta):
"""Confirm that hw_rescue_* image properties are present.
"""
hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
return any(key in image_meta.properties for key in hw_rescue_props)
|
apache-2.0
| -370,589,048,876,975,900 | 39.832796 | 103 | 0.630892 | false |
maulik13/django-user-registration
|
user_registration/backends/default.py
|
1
|
2007
|
from django.contrib.sites.models import Site
from django.contrib.sites.models import RequestSite
from user_registration.models import UserRegistration
class DefaultRegistrationBackend(object):
"""
Backend defines how the registration and activation processes are defined
@register: What to do after valid register form data is receieved
@activate: Activation process for a user based on registration data
@is_registration_open: defines if registration is open
"""
def register(self, request, **kwargs):
"""
Registration process is defined in this method. This should do the following:
1. Store the appropriate data based on your logic
2. Send an email / SMS or any other action for registration process
'kwargs' should contain all the required parameters to create a user
we can confirm that by using REQUIRED_FIELDS list + USERNAME_FIELD in the User model
"""
# create the user and registration data for this user
new_user, reg_data = UserRegistration.objects.register_user(**kwargs)
# Send an email
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
reg_data.send_activation_email(site)
return new_user
def activate(self, request, activation_key):
"""
Activation process should be defined here. By default, it is only doing check
against activation key when user accesses this URL.
You could also check against a secret code that should be provided by the user in
addition to the key. This code can be sent to the user in registration process by
email, SMS etc.
"""
activated = UserRegistration.objects.activate_user(activation_key)
return activated
def is_registration_open(self):
"""
Override this method to add logic for deciding when registration is allowed
"""
return True
|
mit
| -1,950,066,381,843,887,600 | 38.352941 | 92 | 0.683109 | false |
vojtechtrefny/anaconda
|
pyanaconda/anaconda.py
|
1
|
8728
|
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <[email protected]>
# Mike Fulbright <[email protected]>
# Jakub Jelinek <[email protected]>
# Jeremy Katz <[email protected]>
# Chris Lumens <[email protected]>
# Paul Nasrat <[email protected]>
# Erik Troan <[email protected]>
# Matt Wilson <[email protected]>
#
import os
import sys
import stat
from glob import glob
from tempfile import mkstemp
import threading
from pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import iutil
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
# Data for inhibiting the screensaver
self.dbus_session_connection = None
self.dbus_inhibit_id = None
# This is used to synchronize Gtk.main calls between the graphical
# interface and error dialogs. Whoever gets to their initialization code
# first will lock gui_initializing
self.gui_initialized = threading.Lock()
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.dnfpayload import DNFPayload
klass = DNFPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
# zRAM swap devices need to be protected
for zram_dev in glob("/dev/zram*"):
specs.append(zram_dev)
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().items():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
iutil.eintr_retry_call(os.write, fd, dump_text)
iutil.eintr_retry_call(os.close, fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
# Run the GUI in non-fullscreen mode, so live installs can still
# use the window manager
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass, gui_lock=self.gui_initialized,
fullscreen=False)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root=None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0o755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
|
gpl-2.0
| 8,755,333,330,063,547,000 | 35.066116 | 106 | 0.601169 | false |
driver-pete/driver-pete-python-sandbox
|
driver_pete_python_sandbox/test_process.py
|
1
|
7363
|
import tempfile
from driver_pete_python_sandbox.download import S3
from driver_pete_python_sandbox.filter_gps_processor import filter_gps_data,\
FilterChain, DuplicateTimeFilter,\
VelocityOutliersFilter, apply_filter
from driver_pete_python_sandbox.trajectory_reader import read_compressed_trajectory
from driver_pete_python_sandbox.find_routes import find_routes, RoutesFinder
from driver_pete_python_sandbox.find_enpoints import find_endpoints,\
FindEndpoints
import numpy as np
from driver_pete_python_sandbox.gmaps import trajectory_point_to_str
from driver_pete_python_sandbox.process import Path
def _extract_indices(data, paths):
result = []
for p in paths:
indices = [(data[:, 0] == p[0, 0]).nonzero()[0][0],
(data[:, 0] == p[p.shape[0]-1, 0]).nonzero()[0][0]]
result.append(indices)
return result
def _get_point_index(data, point):
return (data[:, 0] == point[0]).nonzero()[0][0]
def test_finding_paths():
folder = tempfile.mkdtemp()
s3 = S3('driverpete-storage')
filename = s3.download("_testing/testing_merged_1", folder)
data = filter_gps_data(read_compressed_trajectory(filename))
endpoints = find_endpoints(data)
assert(len(endpoints) == 2)
print(trajectory_point_to_str(data, _get_point_index(data, endpoints[0])))
print(trajectory_point_to_str(data, _get_point_index(data, endpoints[1])))
assert(_get_point_index(data, endpoints[0]) == 479)
assert(_get_point_index(data, endpoints[1]) == 670)
AtoB_paths, BtoA_paths = find_routes(data, endpoints, verbose=False)
AtoB_paths_indices = _extract_indices(data, AtoB_paths)
BtoA_paths_indices = _extract_indices(data, BtoA_paths)
print(AtoB_paths_indices)
print(BtoA_paths_indices)
assert(AtoB_paths_indices == [[488, 656], [947, 1117], [1364, 1549], [2216, 2401], [2630, 2898], [4400, 4526]])
assert(BtoA_paths_indices == [[134, 455], [683, 887], [1141, 1316], [1582, 1783], [2429, 2597], [3975, 4170]])
def test_finding_paths_with_state():
folder = tempfile.mkdtemp()
s3 = S3('driverpete-storage')
filename = s3.download("_testing/testing_merged_1", folder)
full_trajectory = read_compressed_trajectory(filename)
pieces = []
pieces.append(full_trajectory[0:480])
pieces.append(full_trajectory[480:2000])
pieces.append(full_trajectory[2000:3000])
pieces.append(full_trajectory[3000:4000])
pieces.append(full_trajectory[4000:])
filtered_pieces = []
endpoints = []
findendpoints_state = None
filter_state = None
for piece in pieces:
vel_filter = VelocityOutliersFilter()
if filter_state is not None:
vel_filter.set_state(filter_state)
filter = FilterChain([DuplicateTimeFilter(),
vel_filter])
filtered_piece = apply_filter(piece, filter)
filter_state = vel_filter.get_state()
filtered_pieces.append(filtered_piece)
finder = FindEndpoints(endpoints=endpoints)
if findendpoints_state is not None:
finder.set_state(findendpoints_state)
for d in filtered_piece:
finder.process(d)
endpoints = finder.get_endpoints()
findendpoints_state = finder.get_state()
data = np.vstack(filtered_pieces)
assert(_get_point_index(data, endpoints[0]) == 479)
assert(_get_point_index(data, endpoints[1]) == 670)
AtoB_paths = []
BtoA_paths = []
finder_current_route = []
finder_endpoint_index = None
for piece in filtered_pieces:
finder = RoutesFinder(endpoints)
finder.set_state(finder_current_route, finder_endpoint_index)
for d in piece:
finder.process(d)
finder_current_route, finder_endpoint_index = finder.get_state()
AtoB_paths_piece, BtoA_paths_piece = finder.get_routes()
AtoB_paths += AtoB_paths_piece
BtoA_paths += BtoA_paths_piece
AtoB_paths_indices = _extract_indices(data, AtoB_paths)
BtoA_paths_indices = _extract_indices(data, BtoA_paths)
print(AtoB_paths_indices)
print(BtoA_paths_indices)
assert(AtoB_paths_indices == [[488, 656], [947, 1117], [1364, 1549], [2216, 2401], [2630, 2898], [4400, 4526]])
assert(BtoA_paths_indices == [[134, 455], [683, 887], [1141, 1316], [1582, 1783], [2429, 2597], [3975, 4170]])
def test_finding_paths_with_state_2():
folder = tempfile.mkdtemp()
s3 = S3('driverpete-storage')
pieces = []
pieces_keys = [
"_testing/testing_sequence0/data/14-09-2015_09-15-01_PDT",
"_testing/testing_sequence0/data/14-09-2015_11-03-24_PDT",
"_testing/testing_sequence0/data/14-09-2015_13-49-55_PDT",
"_testing/testing_sequence0/data/14-09-2015_18-20-13_PDT",
"_testing/testing_sequence0/data/14-09-2015_19-59-23_PDT",
"_testing/testing_sequence0/data/15-09-2015_09-32-15_PDT",
"_testing/testing_sequence0/data/15-09-2015_22-31-21_PDT"
]
for k in pieces_keys:
filename = s3.download(k, folder)
pieces.append(read_compressed_trajectory(filename))
filtered_pieces = []
endpoints = []
findendpoints_state = None
filter_state = None
for pi, piece in enumerate(pieces):
vel_filter = VelocityOutliersFilter()
if filter_state is not None:
vel_filter.set_state(filter_state)
filter = FilterChain([DuplicateTimeFilter(),
vel_filter])
filtered_piece = apply_filter(piece, filter)
filter_state = vel_filter.get_state()
filtered_pieces.append(filtered_piece)
finder = FindEndpoints(endpoints=endpoints)
if findendpoints_state is not None:
finder.set_state(findendpoints_state)
for i, d in enumerate(filtered_piece):
finder.process(d)
endpoints = finder.get_endpoints()
findendpoints_state = finder.get_state()
data = np.vstack(filtered_pieces)
print(trajectory_point_to_str(data, _get_point_index(data, endpoints[0])))
print(trajectory_point_to_str(data, _get_point_index(data, endpoints[1])))
assert(len(endpoints) == 2)
assert(_get_point_index(data, endpoints[0]) == 5)
assert(_get_point_index(data, endpoints[1]) == 122)
AtoB_paths = []
BtoA_paths = []
finder_current_route = []
finder_endpoint_index = None
for piece in filtered_pieces:
finder = RoutesFinder(endpoints)
finder.set_state(finder_current_route, finder_endpoint_index)
for d in piece:
finder.process(d)
finder_current_route, finder_endpoint_index = finder.get_state()
AtoB_paths_piece, BtoA_paths_piece = finder.get_routes()
AtoB_paths += AtoB_paths_piece
BtoA_paths += BtoA_paths_piece
AtoB_paths_indices = _extract_indices(data, AtoB_paths)
BtoA_paths_indices = _extract_indices(data, BtoA_paths)
print(AtoB_paths_indices)
print(BtoA_paths_indices)
assert(AtoB_paths_indices == [[11, 111], [556, 730]])
assert(BtoA_paths_indices == [[288, 387]])
if __name__ == '__main__':
test_finding_paths()
test_finding_paths_with_state()
test_finding_paths_with_state_2()
|
apache-2.0
| -457,592,966,986,342,300 | 34.917073 | 115 | 0.636969 | false |
michaelBenin/sqlalchemy
|
lib/sqlalchemy/testing/plugin/noseplugin.py
|
1
|
2735
|
# plugin/noseplugin.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Enhance nose with extra options and behaviors for running SQLAlchemy tests.
Must be run via ./sqla_nose.py so that it is imported in the expected
way (e.g. as a package-less import).
"""
import os
import sys
from nose.plugins import Plugin
fixtures = None
# no package imports yet! this prevents us from tripping coverage
# too soon.
path = os.path.join(os.path.dirname(__file__), "plugin_base.py")
if sys.version_info >= (3,3):
from importlib import machinery
plugin_base = machinery.SourceFileLoader("plugin_base", path).load_module()
else:
import imp
plugin_base = imp.load_source("plugin_base", path)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
global fixtures
from sqlalchemy.testing import fixtures
def begin(self):
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
if fn.__module__ is None:
return False
if fn.__module__.startswith('sqlalchemy.testing'):
return False
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
plugin_base.before_test(test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
|
mit
| 1,261,401,323,931,881,700 | 28.095745 | 84 | 0.626325 | false |
vmalloc/dessert
|
tests/test_dessert.py
|
1
|
3611
|
from _pytest.assertion.rewrite import AssertionRewritingHook as PytestRewriteHook
import os
import shutil
import sys
from contextlib import contextmanager
from tempfile import mkdtemp
import emport
import dessert
import pytest
def test_dessert(module):
with pytest.raises(AssertionError) as error:
module.func()
assert 'dessert*' in str(error.value)
assert "where" in str(error.value)
assert "+" in str(error.value)
def test_disable_introspection(add_assert_message, module, assert_message):
with _disable_introspection():
with pytest.raises(AssertionError) as error:
module.func()
if not add_assert_message:
assert 'dessert*' in str(error.value)
assert "where" in str(error.value)
assert "+" in str(error.value)
else:
assert assert_message in str(error.value)
assert "+" not in str(error.value)
def test_warnings_from_rewrite(source_filename):
tmp_dir = os.path.dirname(source_filename)
full_path = os.path.join(tmp_dir, 'file_with_warnings.py')
with open(full_path, "w") as f:
f.write(r"""
import warnings
warnings.simplefilter('always')
warnings.warn('Some import warning')
def func():
assert True
""")
with dessert.rewrite_assertions_context():
with _disable_pytest_rewriting():
with pytest.warns(None) as caught:
emport.import_file(full_path)
[warning] = caught.list
assert warning.filename == full_path
@pytest.fixture(scope='session', autouse=True)
def mark_dessert():
# pylint: disable=protected-access
assert not dessert.rewrite._MARK_ASSERTION_INTROSPECTION
dessert.rewrite._MARK_ASSERTION_INTROSPECTION = True
@pytest.fixture
def module(request, source_filename):
with dessert.rewrite_assertions_context():
with _disable_pytest_rewriting():
module = emport.import_file(source_filename)
@request.addfinalizer
def drop_from_sys_modules(): # pylint: disable=unused-variable
sys.modules.pop(module.__name__)
return module
@contextmanager
def _disable_pytest_rewriting():
old_meta_path = sys.meta_path[:]
try:
for index, plugin in reversed(list(enumerate(sys.meta_path))):
if isinstance(plugin, PytestRewriteHook):
sys.meta_path.pop(index)
yield
finally:
sys.meta_path[:] = old_meta_path
@contextmanager
def _disable_introspection():
dessert.disable_message_introspection()
try:
yield
finally:
dessert.enable_message_introspection()
@pytest.fixture(params=[
"assert x() + y()",
"assert f(1) > g(100)",
"assert f(g(2)) == f(g(1))",
])
def assertion_line(request):
return request.param
@pytest.fixture(params=[True, False])
def add_assert_message(request):
return request.param
@pytest.fixture
def assert_message(request):
return 'msg'
@pytest.fixture
def source(assertion_line, add_assert_message, assert_message):
if add_assert_message:
assertion_line += ", '{}'".format(assert_message)
returned = """def f(x):
return x
x = lambda: 1
y = lambda: -1
g = h = f
def func():
variable = False
{0}
""".format(assertion_line)
return returned
@pytest.fixture
def source_filename(request, source):
path = mkdtemp()
@request.addfinalizer
def delete(): # pylint: disable=unused-variable
shutil.rmtree(path)
filename = os.path.join(path, "sourcefile.py")
with open(filename, "w") as f:
f.write(source)
return filename
|
mit
| -7,717,314,021,297,973,000 | 24.076389 | 81 | 0.656328 | false |
Galarzaa90/NabBot
|
cogs/tracking.py
|
1
|
83628
|
# Copyright 2019 Allan Galarza
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime as dt
import logging
import pickle
import re
import time
from collections import defaultdict
from typing import List, NamedTuple, Union, Optional, Dict
import asyncpg
import discord
import tibiapy
from discord.ext import commands
from tibiapy import Death, Guild, OnlineCharacter, OtherCharacter, World
from nabbot import NabBot
from .utils import CogUtils, EMBED_LIMIT, FIELD_VALUE_LIMIT, checks, config, get_user_avatar, is_numeric, join_list, \
online_characters, safe_delete_message, split_params
from .utils.context import NabCtx
from .utils.database import DbChar, DbDeath, DbLevelUp, get_affected_count, get_server_property, PoolConn
from .utils.errors import CannotPaginate, NetworkError
from .utils.messages import death_messages_monster, death_messages_player, format_message, level_messages, \
split_message, weighed_choice, DeathMessageCondition, LevelCondition, SIMPLE_LEVEL, SIMPLE_DEATH, SIMPLE_PVP_DEATH
from .utils.pages import Pages, VocationPages
from .utils.tibia import HIGHSCORE_CATEGORIES, NabChar, get_character, get_current_server_save_time, get_guild, \
get_highscores, get_share_range, get_voc_abb, get_voc_emoji, get_world, tibia_worlds, normalize_vocation
log = logging.getLogger("nabbot")
# Storage used to keep a cache of guilds for watchlists
GUILD_CACHE = defaultdict(dict) # type: defaultdict[str, Dict[str, Guild]]
WATCHLIST_SEPARATOR = "·"
class CharactersResult(NamedTuple):
skipped: List[OtherCharacter]
no_user: List[DbChar]
same_owner: List[DbChar]
different_user: List[DbChar]
new: List[NabChar]
all_skipped: bool
# region Database Helper classes
class Watchlist:
"""Represents a Watchlist from the database"""
def __init__(self, **kwargs):
self.server_id: int = kwargs.get("server_id")
self.channel_id: int = kwargs.get("channel_id")
self.message_id: int = kwargs.get("message_id")
self.user_id: int = kwargs.get("user_id")
self.show_count: bool = kwargs.get("show_count", True)
self.created: dt.datetime = kwargs.get("created")
# Not columns
self.entries: List['WatchlistEntry'] = []
self.world = None
self.content = ""
self.online_characters: List[OnlineCharacter] = []
self.online_guilds: List[Guild] = []
self.disbanded_guilds: List[str] = []
self.description = ""
@property
def online_count(self) -> int:
"""Total number of online characters across entries."""
return len(self.online_characters) + sum(g.online_count for g in self.online_guilds)
def __repr__(self):
return "<{0.__class__.__name__} server_id={0.server_id} channel_id={0.channel_id} message_id={0.message_id}>"\
.format(self)
async def add_entry(self, conn: PoolConn, name: str, is_guild: bool, user_id: int, reason: Optional[str]) ->\
Optional['WatchlistEntry']:
""" Adds an entry to the watchlist.
:param conn: Connection to the database.
:param name: Name of the character or guild.
:param is_guild: Whether the entry is a guild or not.
:param user_id: The user that created the entry.
:param reason: The reason for the entry.
:return: The new created entry or None if it already exists.
"""
try:
return await WatchlistEntry.insert(conn, self.channel_id, name, is_guild, user_id, reason)
except asyncpg.UniqueViolationError:
return None
async def get_entries(self, conn: PoolConn) -> List['WatchlistEntry']:
"""Gets all entries in this watchlist.
:param conn: Connection to the database.
:return: List of entries if any.
"""
return await WatchlistEntry.get_entries_by_channel(conn, self.channel_id)
async def update_message_id(self, conn: PoolConn, message_id: int):
"""Update's the message id.
:param conn: Connection to the database.
:param message_id: The new message id.
"""
await conn.execute("UPDATE watchlist SET message_id = $1 WHERE channel_id = $2", message_id, self.channel_id)
self.message_id = message_id
async def update_show_count(self, conn: PoolConn, show_count: bool):
"""Update's the show_count property.
If the property is True, the number of online entries will be shown in the channel's name.
:param conn: Connection to the database.
:param show_count: The property's new value.
"""
await conn.execute("UPDATE watchlist SET show_count = $1 WHERE channel_id = $2", show_count, self.channel_id)
self.show_count = show_count
@classmethod
async def insert(cls, conn: PoolConn, server_id: int, channel_id: int, user_id: int) -> 'Watchlist':
"""Adds a new watchlist to the database.
:param conn: Connection to the database.
:param server_id: The discord guild's id.
:param channel_id: The channel's id.
:param user_id: The user that created the watchlist.
:return: The created watchlist.
"""
row = await conn.fetchrow("INSERT INTO watchlist(server_id, channel_id, user_id) VALUES($1,$2,$3) RETURNING *",
server_id, channel_id, user_id)
return cls(**row)
@classmethod
async def get_by_channel_id(cls, conn: PoolConn, channel_id: int) -> Optional['Watchlist']:
"""Gets a watchlist corresponding to the channel id.
:param conn: Connection to the database.
:param channel_id: The id of the channel.
:return: The found watchlist, if any."""
row = await conn.fetchrow("SELECT * FROM watchlist WHERE channel_id = $1", channel_id)
if row is None:
return None
return cls(**row)
@classmethod
async def get_by_world(cls, conn: PoolConn, world: str) -> List['Watchlist']:
"""
Gets all watchlist from a Tibia world.
:param conn: Connection to the database.
:param world: The name of the world.
:return: A list of watchlists from the world.
"""
query = """SELECT t0.* FROM watchlist t0
LEFT JOIN server_property t1 ON t1.server_id = t0.server_id AND key = 'world'
WHERE value ? $1"""
rows = await conn.fetch(query, world)
return [cls(**row) for row in rows]
@classmethod
def sort_by_voc_and_level(cls):
"""Sorting function to order by vocation and then by level."""
return lambda char: (normalize_vocation(char.vocation), -char.level)
class WatchlistEntry:
"""Represents a watchlist entry."""
def __init__(self, **kwargs):
self.channel_id: int = kwargs.get("channel_id")
self.name: str = kwargs.get("name")
self.is_guild: bool = kwargs.get("is_guild", False)
self.reason: Optional[str] = kwargs.get("reason")
self.user_id: int = kwargs.get("user_id")
self.created: dt.datetime = kwargs.get("created")
async def remove(self, conn: PoolConn):
"""Removes a watchlist entry from the database.
:param conn: Connection to the database.
"""
await self.delete(conn, self.channel_id, self.name, self.is_guild)
@classmethod
async def delete(cls, conn: PoolConn, channel_id: int, name: str, is_guild: bool):
"""
:param conn: Connection to the databse.
:param channel_id: The id of the watchlist's channel.
:param name: The name of the entry.
:param is_guild: Whether the entry is a guild or a character.
"""
await conn.execute("DELETE FROM watchlist_entry WHERE channel_id = $1 AND lower(name) = $2 AND is_guild = $3",
channel_id, name.lower().strip(), is_guild)
@classmethod
async def get_by_name(cls, conn: PoolConn, channel_id: int, name: str, is_guild: bool) -> \
Optional['WatchlistEntry']:
"""Gets an entry by its name.
:param conn: Connection to the database.
:param channel_id: The id of the channel.
:param name: Name of the entry.
:param is_guild: Whether the entry is a guild or a character.
:return: The entry if found.
"""
row = await conn.fetchrow("SELECT * FROM watchlist_entry "
"WHERE channel_id = $1 AND lower(name) = $2 AND is_guild = $3",
channel_id, name.lower().strip(), is_guild)
if row is None:
return None
return cls(**row)
@classmethod
async def get_entries_by_channel(cls, conn, channel_id) -> List['WatchlistEntry']:
"""Gets entries related to a watchlist channel.
:param conn: Connection to the database.
:param channel_id: Id of the channel.
:return: A list of entries corresponding to the channel.
"""
rows = await conn.fetch("SELECT * FROM watchlist_entry WHERE channel_id = $1", channel_id)
return [cls(**row) for row in rows]
@classmethod
async def insert(cls, conn: PoolConn, channel_id: int, name: str, is_guild: bool, user_id: int, reason=None)\
-> Optional['WatchlistEntry']:
"""Inserts a watchlist entry into the database.
:param conn: Connection to the database.
:param channel_id: The id of the watchlist's channel.
:param name: Name of the entry.
:param is_guild: Whether the entry is a guild or a character.
:param user_id: The id of the user that added the entry.
:param reason: The reason for the entry.
:return: The inserted entry.
"""
row = await conn.fetchrow("INSERT INTO watchlist_entry(channel_id, name, is_guild, reason, user_id) "
"VALUES($1, $2, $3, $4, $5) RETURNING *", channel_id, name, is_guild, reason, user_id)
if row is None:
return None
return cls(**row)
# endregion
class Tracking(commands.Cog, CogUtils):
"""Commands related to NabBot's tracking system."""
def __init__(self, bot: NabBot):
self.bot = bot
self.scan_online_chars_task = bot.loop.create_task(self.scan_online_chars())
self.scan_highscores_task = bot.loop.create_task(self.scan_highscores())
self.world_tasks = {}
self.world_times = {}
# region Tasks
async def scan_deaths(self, world):
"""Iterates through online characters, checking if they have new deaths.
This task is created for every tracked world.
On every iteration, the last element is checked and reinserted at the beginning."""
#################################################
# Nezune's cave #
# Do not touch anything, enter at your own risk #
#################################################
tag = f"{self.tag}[{world}][scan_deaths]"
await self.bot.wait_until_ready()
log.info(f"{tag} Started")
while not self.bot.is_closed():
try:
await asyncio.sleep(config.death_scan_interval)
if len(online_characters[world]) == 0:
await asyncio.sleep(0.5)
continue
skip = False
# Pop last char in queue, reinsert it at the beginning
current_char = online_characters[world].pop()
if hasattr(current_char, "last_check") and time.time() - current_char.last_check < 45:
skip = True
current_char.last_check = time.time()
online_characters[world].insert(0, current_char)
if not skip:
# Check for new death
char = await get_character(self.bot, current_char.name)
await self.compare_deaths(char)
else:
await asyncio.sleep(0.5)
except NetworkError:
await asyncio.sleep(0.3)
continue
except asyncio.CancelledError:
# Task was cancelled, so this is fine
break
except KeyError:
continue
except Exception as e:
log.exception(f"{tag} Exception: {e}")
continue
async def scan_highscores(self):
"""Scans the highscores, storing the results in the database.
The task checks if the last stored data is from the current server save or not."""
#################################################
# Nezune's cave #
# Do not touch anything, enter at your own risk #
#################################################
tag = f"{self.tag}[scan_highscores]"
await self.bot.wait_until_ready()
log.info(f"{tag} Started")
while not self.bot.is_closed():
if len(self.bot.tracked_worlds_list) == 0:
# If no worlds are tracked, just sleep, worlds might get registered later
await asyncio.sleep(10*60)
continue
for world in self.bot.tracked_worlds_list:
tag = f"{self.tag}[{world}](scan_highscores)"
world_count = 0
if world not in tibia_worlds:
log.warning(f"{tag} Tracked world is no longer a valid world.")
await asyncio.sleep(0.1)
try:
for key, values in HIGHSCORE_CATEGORIES.items():
# Check the last scan time, highscores are updated every server save
last_scan = await self.bot.pool.fetchval(
"SELECT last_scan FROM highscores WHERE world = $1 AND category = $2", world, key)
if last_scan:
last_scan_ss = get_current_server_save_time(last_scan)
current_ss = get_current_server_save_time()
# If the saved results are from the current server save, saving is skipped
if last_scan_ss >= current_ss:
log.debug(f"{tag} {values[0].name} | {values[1].name} | Already saved")
await asyncio.sleep(0.1)
continue
try:
highscores = await get_highscores(world, *values)
except NetworkError:
continue
await self.save_highscores(world, key, highscores)
except asyncio.CancelledError:
# Task was cancelled, so this is fine
break
except Exception:
log.exception(f"{tag}")
continue
if world_count:
log.info(f"{tag} {world_count:,} entries saved.")
await asyncio.sleep(5)
await asyncio.sleep(60*30)
async def scan_online_chars(self):
"""Scans tibia.com's character lists to store them locally.
A online list per world is created, with the online registered characters.
When a character enters the online list, their deaths are checked.
On every cycle, their levels are compared.
When a character leaves the online list, their levels and deaths are compared."""
#################################################
# Nezune's cave #
# Do not touch anything, enter at your own risk #
#################################################
await self.bot.wait_until_ready()
tag = f"{self.tag}[scan_online_chars]"
log.info(f"{tag} Task started")
try:
with open("data/online_list.dat", "rb") as f:
saved_list, timestamp = pickle.load(f)
if (time.time() - timestamp) < config.online_list_expiration:
online_characters.clear()
online_characters.update(saved_list)
count = len([c for v in online_characters.values() for c in v])
log.info(f"{tag} Loaded cached online list | {count:,} players")
else:
log.info(f"{tag} Cached online list is too old, discarding")
except FileNotFoundError:
pass
except (ValueError, pickle.PickleError):
log.info(f"{tag} Couldn't read cached online list.")
while not self.bot.is_closed():
try:
# Pop last server in queue, reinsert it at the beginning
current_world = tibia_worlds.pop()
tibia_worlds.insert(0, current_world)
if current_world.capitalize() not in self.bot.tracked_worlds_list:
await asyncio.sleep(0.1)
continue
if time.time() - self.world_times.get(current_world.capitalize(), 0) < config.online_scan_interval:
await asyncio.sleep(0.2)
continue
tag = f"{self.tag}[{current_world}][scan_online_chars]"
log.debug(f"{tag} Checking online list")
# Get online list for this server
try:
world = await get_world(current_world)
if world is None:
await asyncio.sleep(0.1)
continue
log.debug(f"{tag} {world.online_count} players online")
except NetworkError:
await asyncio.sleep(0.1)
continue
current_world_online = world.online_players
if len(current_world_online) == 0:
await asyncio.sleep(0.1)
continue
self.world_times[world.name] = time.time()
self.bot.dispatch("world_scanned", world)
# Save the online list in file
with open("data/online_list.dat", "wb") as f:
pickle.dump((online_characters, time.time()), f, protocol=pickle.HIGHEST_PROTOCOL)
if current_world not in online_characters:
online_characters[current_world] = []
# List of characters that are now offline
offline_list = [c for c in online_characters[current_world] if c not in current_world_online]
for offline_char in offline_list:
# Check if characters got level ups when they went offline
log.debug(f"{tag} Character no longer online | {offline_char.name}")
online_characters[current_world].remove(offline_char)
try:
_char = await get_character(self.bot, offline_char.name)
await self.compare_levels(_char)
await self.compare_deaths(_char)
except NetworkError:
continue
# Add new online chars and announce level differences
for server_char in current_world_online:
db_char = await DbChar.get_by_name(self.bot.pool, server_char.name)
if db_char:
try:
if server_char not in online_characters[current_world]:
# If the character wasn't in the online list we add them
# (We insert them at the beginning of the list to avoid messing with the checks order)
server_char.last_check = time.time()
log.debug(f"{tag} Character added to online list | {server_char.name}")
online_characters[current_world].insert(0, server_char)
_char = await get_character(self.bot, server_char.name)
await self.compare_deaths(_char)
# Only update level up, but don't count it as a level up
await self.compare_levels(_char, True)
else:
await self.compare_levels(server_char)
# Update character in the list
_char_index = online_characters[current_world].index(server_char)
online_characters[current_world][_char_index].level = server_char.level
except NetworkError:
continue
except (ValueError, IndexError):
continue
except asyncio.CancelledError:
# Task was cancelled, so this is fine
break
except Exception:
log.exception("scan_online_chars")
continue
# endregion
# region Custom Events
@commands.Cog.listener()
async def on_world_scanned(self, scanned_world: World):
"""Event called each time a world is checked.
Updates the watchlists
:param scanned_world: The scanned world's information.
"""
# Schedule Scan Deaths task for this world
if scanned_world.name not in self.world_tasks:
self.world_tasks[scanned_world.name] = self.bot.loop.create_task(self.scan_deaths(scanned_world.name))
GUILD_CACHE[scanned_world.name].clear()
await self._run_watchlist(scanned_world)
async def _run_watchlist(self, scanned_world: World):
watchlists = await Watchlist.get_by_world(self.bot.pool, scanned_world.name)
for watchlist in watchlists:
watchlist.world = scanned_world.name
log.debug(f"{self.tag}[{scanned_world.name}] Checking entries for watchlist | "
f"Guild ID: {watchlist.server_id} | Channel ID: {watchlist.channel_id} "
f"| World: {scanned_world.name}")
guild: discord.Guild = self.bot.get_guild(watchlist.server_id)
if guild is None:
await asyncio.sleep(0.01)
continue
discord_channel: discord.TextChannel = guild.get_channel(watchlist.channel_id)
if discord_channel is None:
await asyncio.sleep(0.1)
continue
watchlist.entries = await watchlist.get_entries(self.bot.pool)
if not watchlist.entries:
await asyncio.sleep(0.1)
continue
await self._watchlist_scan_entries(watchlist, scanned_world)
await self._watchlist_build_content(watchlist)
await self._watchlist_update_content(watchlist, discord_channel)
async def _watchlist_scan_entries(self, watchlist: Watchlist, scanned_world: World):
for entry in watchlist.entries:
if entry.is_guild:
await self._watchlist_check_guild(watchlist, entry)
# If it is a character, check if he's in the online list
else:
self._watchlist_add_characters(watchlist, entry, scanned_world)
watchlist.online_characters.sort(key=Watchlist.sort_by_voc_and_level())
@classmethod
async def _watchlist_check_guild(cls, watchlist, watched_guild: WatchlistEntry):
try:
tibia_guild = await cls.cached_get_guild(watched_guild.name, watchlist.world)
except NetworkError:
return
# Save disbanded guilds separately
if tibia_guild is None:
watchlist.disbanded_guilds.append(watched_guild.name)
return
# If there's at least one member online, add guild to list
if tibia_guild.online_count:
watchlist.online_guilds.append(tibia_guild)
@staticmethod
def _watchlist_add_characters(watchlist, watched_char: WatchlistEntry, scanned_world: World):
for online_char in scanned_world.online_players:
if online_char.name == watched_char.name:
# Add to online list
watchlist.online_characters.append(online_char)
return
@staticmethod
def _watchlist_get_msg_entries(characters):
return [f"\t{char.name} - Level {char.level} {get_voc_emoji(char.vocation)}" for char in characters]
async def _watchlist_build_content(self, watchlist):
if watchlist.online_count > 0:
msg_entries = self._watchlist_get_msg_entries(watchlist.online_characters)
watchlist.content = "\n".join(msg_entries)
self._watchlist_build_guild_content(watchlist)
else:
watchlist.description = "There are no watched characters online."
def _watchlist_build_guild_content(self, watchlist):
for guild_name in watchlist.disbanded_guilds:
watchlist.content += f"\n__Guild: **{guild_name}**__\n"
watchlist.content += "\t*Guild was disbanded.*"
for tibia_guild in watchlist.online_guilds:
watchlist.content += f"\n__Guild: **{tibia_guild.name}**__\n"
online_members = tibia_guild.online_members[:]
online_members.sort(key=Watchlist.sort_by_voc_and_level())
watchlist.content += "\n".join(self._watchlist_get_msg_entries(online_members))
async def _watchlist_update_content(self, watchlist: Watchlist, channel: discord.TextChannel):
# Send new watched message or edit last one
embed = discord.Embed(description=watchlist.description, timestamp=dt.datetime.utcnow())
embed.set_footer(text="Last updated")
if watchlist.content:
if len(watchlist.content) >= EMBED_LIMIT - 50:
watchlist.content = split_message(watchlist.content, EMBED_LIMIT - 50)[0]
watchlist.content += "\n*And more...*"
fields = split_message(watchlist.content, FIELD_VALUE_LIMIT)
for s, split_field in enumerate(fields):
name = "Watchlist" if s == 0 else "\u200F"
embed.add_field(name=name, value=split_field, inline=False)
try:
await self._watchlist_update_message(self.bot.pool, watchlist, channel, embed)
await self._watchlist_update_name(watchlist, channel)
except discord.HTTPException:
# log.exception(f"{self.tag}[_watchlist_update_content] {watchlist}")
pass
@staticmethod
async def _watchlist_update_name(watchlist: Watchlist, channel: discord.TextChannel):
try:
original_name = channel.name.split(WATCHLIST_SEPARATOR, 1)[0]
if original_name != channel.name and not watchlist.show_count:
await channel.edit(name=original_name, reason="Removing online count")
elif watchlist.show_count:
new_name = f"{original_name}{WATCHLIST_SEPARATOR}{watchlist.online_count}"
# Reduce unnecessary API calls and Audit log spam
if new_name != channel.name:
await channel.edit(name=new_name, reason="Online count changed")
except discord.Forbidden:
pass
@staticmethod
async def _watchlist_update_message(conn, watchlist, channel, embed):
# We try to get the watched message, if the bot can't find it, we just create a new one
# This may be because the old message was deleted or this is the first time the list is checked
try:
message = await channel.fetch_message(watchlist.message_id)
except discord.HTTPException:
message = None
if message is None:
new_message = await channel.send(embed=embed)
await watchlist.update_message_id(conn, new_message.id)
else:
await message.edit(embed=embed)
# endregion
# region Discord Events
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel: discord.abc.GuildChannel):
"""Called when a guild channel is deleted.
Deletes associated watchlist and entries."""
if not isinstance(channel, discord.TextChannel):
return
result = await self.bot.pool.execute("DELETE FROM watchlist_entry WHERE channel_id = $1", channel.id)
deleted_entries = get_affected_count(result)
result = await self.bot.pool.execute("DELETE FROM watchlist WHERE channel_id = $1", channel.id)
deleted = get_affected_count(result)
if deleted:
# Dispatch event so ServerLog cog can handle it.
log.info(f"{self.tag} Watchlist channel deleted | Channel {channel.id} | Guild {channel.guild.id}")
self.bot.dispatch("watchlist_deleted", channel, deleted_entries)
# endregion
# region Commands
@checks.server_mod_only()
@checks.tracking_world_only()
@commands.command(name="addchar", aliases=["registerchar"], usage="<user>,<character>")
async def add_char(self, ctx: NabCtx, *, params):
"""Register a character and optionally all other visible characters to a discord user.
This command can only be used by server moderators.
If a character is hidden, only that character will be added. Characters in other worlds are skipped."""
params = params.split(",")
if len(params) != 2:
raise commands.BadArgument()
target_name, char_name = params
target_name = target_name.strip()
target = self.bot.get_member(target_name, ctx.guild)
if target is None:
return await ctx.error(f"I couldn't find any users named `{target_name}`")
if target.bot:
return await ctx.error("You can't register characters to discord bots!")
msg = await ctx.send(f"{config.loading_emoji} Fetching characters...")
try:
char = await get_character(ctx.bot, char_name)
if char is None:
return await msg.edit(content="That character doesn't exist.")
except NetworkError:
return await msg.edit(content="I couldn't fetch the character, please try again.")
check_other = False
if len(char.other_characters) > 1:
message = await ctx.send("Do you want to attempt to add the other visible characters in this account?")
check_other = await ctx.react_confirm(message, timeout=60, delete_after=True)
if check_other is None:
await safe_delete_message(msg)
return await ctx.error("You ran out of time, try again."
"Remember you have to react or click on the reactions.")
if check_other:
await safe_delete_message(msg)
msg = await ctx.send(f"{config.loading_emoji} Fetching characters...")
try:
results = await self.check_char_availability(ctx, ctx.author.id, char, [ctx.world], check_other)
except NetworkError:
return await msg.edit("I'm having network issues, please try again.")
if results.all_skipped:
await safe_delete_message(msg)
await ctx.error(f"Sorry, I couldn't find any characters in **{ctx.world}**.")
return
reply = await self.process_character_assignment(ctx, results, target, ctx.author)
await safe_delete_message(msg)
await ctx.send(reply)
@commands.command()
@checks.tracking_world_somewhere()
async def claim(self, ctx: NabCtx, *, char_name: str = None):
"""Claims a character registered as yours.
Claims a character as yours, even if it is already registered to someone else.
In order for this to work, you have to put a special code in the character's comment.
You can see this code by using the command with no parameters. The code looks like this: `/NB-23FC13AC7400000/`
Once you had set the code, you can use the command with that character, if the code matches,
it will be reassigned to you.
Note that it may take some time for the code to be visible to NabBot because of caching.
This code is unique for your discord user, so the code will only work for your discord account and no one else.
No one can claim a character of yours unless you put **their** code on your character's comment.
"""
user = ctx.author
claim_pattern = re.compile(r"/NB-([^/]+)/")
user_code = hex(user.id)[2:].upper()
# List of Tibia worlds tracked in the servers the user is
if ctx.is_private:
user_tibia_worlds = [ctx.world]
else:
user_tibia_worlds = ctx.bot.get_user_worlds(user.id)
if not ctx.is_private and self.bot.tracked_worlds.get(ctx.guild.id) is None:
return await ctx.send("This server is not tracking any tibia worlds.")
if len(user_tibia_worlds) == 0:
return
if char_name is None:
await ctx.send(f"To use this command, add `/NB-{user_code}/` to the comment of the character you want to"
f"claim, and then use `/claim character_name`.")
return
msg = await ctx.send(f"{config.loading_emoji} Fetching character...")
try:
char = await get_character(ctx.bot, char_name)
if char is None:
return await msg.edit(content=f"{ctx.tick(False)} That character doesn't exist.")
except NetworkError:
return await msg.edit(content=f"{ctx.tick(False)} I couldn't fetch the character, please try again.")
match = claim_pattern.search(char.comment if char.comment is not None else "")
if not match:
await ctx.error(f"Couldn't find verification code on character's comment.\n"
f"Add `/NB-{user_code}/` to the comment to authenticate.")
return
code = match.group(1)
if code != user_code:
await ctx.error(f"The verification code on the character's comment doesn't match yours.\n"
f"Use `/NB-{user_code}/` to authenticate.")
return
check_other = False
if len(char.other_characters) > 1:
message = await ctx.send("Do you want to attempt to add the other visible characters in this account?")
check_other = await ctx.react_confirm(message, timeout=60, delete_after=True)
if check_other is None:
await safe_delete_message(msg)
return await ctx.send("You ran out of time, try again."
"Remember you have to react or click on the reactions.")
if check_other:
await safe_delete_message(msg)
msg = await ctx.send(f"{config.loading_emoji} Fetching characters...")
try:
results = await self.check_char_availability(ctx, ctx.author.id, char, user_tibia_worlds, check_other)
except NetworkError:
return await msg.edit("I'm having network issues, please try again.")
if results.all_skipped:
reply = "Sorry, I couldn't find any characters from the worlds in the context ({0})."
return await msg.edit(content=reply.format(join_list(user_tibia_worlds)))
reply = await self.process_character_assignment(ctx, results, ctx.author, claim=True)
await safe_delete_message(msg)
await ctx.send(reply)
@checks.tracking_world_somewhere()
@commands.command(aliases=["i'm", "iam"])
async def im(self, ctx: NabCtx, *, char_name: str):
"""Lets you add your tibia character(s) for the bot to track.
If there are other visible characters, the bot will ask for confirmation to add them too.
Characters in other worlds other than the currently tracked world are skipped.
If it finds a character owned by another user, the whole process will be stopped.
If a character is already registered to someone else, `claim` can be used."""
# List of Tibia worlds tracked in the servers the user is
if ctx.is_private:
user_tibia_worlds = [ctx.world]
else:
user_tibia_worlds = ctx.bot.get_user_worlds(ctx.author.id)
msg = await ctx.send(f"{config.loading_emoji} Fetching character...")
try:
char = await get_character(ctx.bot, char_name)
if char is None:
return await msg.edit(content=f"{ctx.tick(False)} That character doesn't exist.")
except NetworkError:
return await msg.edit(content=f"{ctx.tick(False)} I couldn't fetch the character, please try again.")
check_other = False
if len(char.other_characters) > 1:
await msg.edit(content="Do you want to attempt to add the other visible characters in this account?")
check_other = await ctx.react_confirm(msg, timeout=60, delete_after=True)
if check_other is None:
await safe_delete_message(msg)
return await ctx.send("You didn't reply in time, try again."
"Remember that you have to react or click on the icons.")
if check_other:
await safe_delete_message(msg)
msg = await ctx.send(f"{config.loading_emoji} Fetching characters...")
try:
results = await self.check_char_availability(ctx, ctx.author.id, char, user_tibia_worlds, check_other)
except NetworkError:
return await msg.edit("I'm having network issues, please try again.")
if results.all_skipped:
reply = "Sorry, I couldn't find any characters from the worlds in the context ({0})."
return await msg.edit(content=reply.format(join_list(user_tibia_worlds)))
reply = await self.process_character_assignment(ctx, results, ctx.author)
await safe_delete_message(msg)
await ctx.send(reply)
@checks.tracking_world_somewhere()
@commands.command(aliases=["i'mnot"])
async def imnot(self, ctx: NabCtx, *, name):
"""Removes a character assigned to you.
All registered level ups and deaths will be lost forever."""
db_char = await DbChar.get_by_name(ctx.pool, name)
if db_char is None or db_char.user_id == 0:
return await ctx.error("There's no character registered with that name.")
if db_char.user_id != ctx.author.id:
return await ctx.error(f"The character **{db_char.name}** is not registered to you.")
message = await ctx.send(f"Are you sure you want to unregister "
f"**{db_char.name}** ({abs(db_char.level)} {db_char.vocation})?")
confirm = await ctx.react_confirm(message, timeout=50)
if confirm is None:
return await ctx.send("I guess you changed your mind.")
if not confirm:
return await ctx.send("No then? Ok.")
await db_char.update_user(ctx.pool, 0)
await ctx.success(f"**{db_char.name}** is no longer registered to you.")
self.bot.dispatch("character_change", ctx.author.id)
self.bot.dispatch("character_unregistered", ctx.author, db_char)
@checks.can_embed()
@checks.tracking_world_only()
@commands.command()
async def online(self, ctx: NabCtx):
"""Tells you which users are online on Tibia.
This list gets updated based on Tibia.com online list, so it takes a couple minutes to be updated."""
world = ctx.world
per_page = 20 if await ctx.is_long() else 5
now = dt.datetime.utcnow()
uptime = (now - self.bot.start_time).total_seconds()
count = 0
entries = []
vocations = []
for char in online_characters.get(world, []):
name = char.name
db_char = await DbChar.get_by_name(ctx.pool, name)
if not db_char:
continue
# Skip characters of members not in the server
owner = ctx.guild.get_member(db_char.user_id)
if owner is None:
continue
owner = owner.display_name
emoji = get_voc_emoji(char.vocation)
vocations.append(char.vocation.value)
vocation = get_voc_abb(char.vocation)
entries.append(f"{char.name} (Lvl {char.level} {vocation}{emoji}, **@{owner}**)")
count += 1
if count == 0:
if uptime < 90:
await ctx.send("I just started, give me some time to check online lists...⌛")
else:
await ctx.send("There is no one online from Discord.")
return
pages = VocationPages(ctx, entries=entries, vocations=vocations, per_page=per_page)
pages.embed.title = "Users online"
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.send(e)
@commands.command(name="searchteam", aliases=["whereteam", "findteam"], usage="<params>")
@checks.tracking_world_only()
@checks.can_embed()
async def search_team(self, ctx: NabCtx, *, params=None):
"""Searches for a registered character that meets the criteria
There are 3 ways to use this command:
- Show characters in share range with a specific character. (`searchteam <name>`)
- Show characters in share range with a specific level. (`searchteam <level>`)
- Show characters in a level range. (`searchteam <min>,<max>`)
Online characters are shown first on the list, they also have an icon."""
permissions = ctx.bot_permissions
if not permissions.embed_links:
await ctx.send("Sorry, I need `Embed Links` permission for this command.")
return
invalid_arguments = "Invalid arguments used, examples:\n" \
"```/searchteam charname\n" \
"/searchteam level\n" \
"/searchteam minlevel,maxlevel```"
if ctx.world is None:
await ctx.send("This server is not tracking any tibia worlds.")
return
if params is None:
await ctx.send(invalid_arguments)
return
entries = []
vocations = []
online_entries = []
online_vocations = []
per_page = 20 if await ctx.is_long() else 5
char = None
params = split_params(params)
if len(params) < 1 or len(params) > 2:
await ctx.send(invalid_arguments)
return
# params[0] could be a character's name, a character's level or one of the level ranges
# If it's not a number, it should be a player's name
if not is_numeric(params[0]):
# We shouldn't have another parameter if a character name was specified
if len(params) == 2:
await ctx.send(invalid_arguments)
return
char = await get_character(ctx.bot, params[0])
if char is None:
await ctx.send("I couldn't find a character with that name.")
return
low, high = get_share_range(char.level)
title = f"Characters in share range with {char.name}({low}-{high}):"
empty = f"I didn't find anyone in share range with **{char.name}**({low}-{high})"
else:
# Check if we have another parameter, meaning this is a level range
if len(params) == 2:
try:
level1 = int(params[0])
level2 = int(params[1])
except ValueError:
await ctx.send(invalid_arguments)
return
if level1 <= 0 or level2 <= 0:
await ctx.send("You entered an invalid level.")
return
low = min(level1, level2)
high = max(level1, level2)
title = f"Characters between level {low} and {high}"
empty = f"I didn't find anyone between levels **{low}** and **{high}**"
# We only got a level, so we get the share range for it
else:
if int(params[0]) <= 0:
await ctx.send("You entered an invalid level.")
return
low, high = get_share_range(int(params[0]))
title = f"Characters in share range with level {params[0]} ({low}-{high})"
empty = f"I didn't find anyone in share range with level **{params[0]}** ({low}-{high})"
async with ctx.pool.acquire() as conn:
count = 0
online_list = [x.name for v in online_characters.values() for x in v]
async for db_char in DbChar.get_chars_in_range(conn, low, high, ctx.world):
if char is not None and char.name == db_char.name:
continue
owner = ctx.guild.get_member(db_char.user_id)
if owner is None:
continue
count += 1
owner = owner.display_name
emoji = get_voc_emoji(db_char.vocation)
voc_abb = get_voc_abb(db_char.vocation)
entry = f"**{db_char.name}** - Level {abs(db_char.level)} {voc_abb}{emoji} - @**{owner}**"
if db_char.name in online_list:
entry = f"{config.online_emoji}{entry}"
online_entries.append(entry)
online_vocations.append(db_char.vocation)
else:
entries.append(entry)
vocations.append(db_char.vocation)
if count < 1:
await ctx.send(empty)
return
pages = VocationPages(ctx, entries=online_entries + entries, per_page=per_page,
vocations=online_vocations + vocations)
pages.embed.title = title
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.send(e)
@checks.server_mod_only()
@checks.tracking_world_only()
@commands.command(name="removechar", aliases=["deletechar", "unregisterchar"])
async def remove_char(self, ctx: NabCtx, *, name):
"""Removes a registered character from someone.
This can only be used by server moderators.
Note that you can only remove chars if they are from users exclusively in your server.
You can't remove any characters that would alter other servers NabBot is in."""
# This could be used to remove deleted chars so we don't need to check anything
# Except if the char exists in the database...
db_char = await DbChar.get_by_name(ctx.pool, name.strip())
if db_char is None or db_char.user_id == 0:
return await ctx.error("There's no character with that name registered.")
if db_char.world != ctx.world:
return await ctx.error(f"The character **{db_char.name}** is in a different world.")
user = self.bot.get_user(db_char.user_id)
if user is not None:
user_guilds = self.bot.get_user_guilds(user.id)
# Iterating every world where the user is, to check if it wouldn't affect other admins.
for guild in user_guilds:
if guild == ctx.guild:
continue
if self.bot.tracked_worlds.get(guild.id, None) != ctx.world:
continue
author: discord.Member = guild.get_member(ctx.author.id)
if author is None or not author.guild_permissions.manage_guild:
await ctx.error(f"The user of this server is also in another server tracking "
f"**{ctx.world}**, where you are not an admin. You can't alter other servers.")
return
username = "unknown" if user is None else user.display_name
await db_char.update_user(ctx.pool, 0)
await ctx.send("**{0}** was removed successfully from **@{1}**.".format(db_char.name, username))
self.bot.dispatch("character_unregistered", user, db_char, ctx.author)
@checks.server_mod_only()
@checks.tracking_world_only()
@commands.group(invoke_without_command=True, case_insensitive=True, aliases=["huntedlist"])
async def watchlist(self, ctx: NabCtx):
"""Create or manage watchlists.
Watchlists are channels where the online status of selected characters are shown.
You can create multiple watchlists and characters and guilds to each one separately.
Try the subcommands."""
await ctx.send("To manage watchlists, use one of the subcommands.\n"
f"Try `{ctx.clean_prefix}help {ctx.invoked_with}`.")
@checks.tracking_world_only()
@checks.channel_mod_somewhere()
@watchlist.command(name="add", aliases=["addplayer", "addchar"], usage="<channel> <name>[,reason]")
async def watchlist_add(self, ctx: NabCtx, channel: discord.TextChannel, *, params):
"""Adds a character to a watchlist.
A reason can be specified by adding it after the character's name, separated by a comma."""
watchlist = await Watchlist.get_by_channel_id(ctx.pool, channel.id)
if not watchlist:
return await ctx.error(f"{channel.mention} is not a watchlist channel.")
if not channel.permissions_for(ctx.author).manage_channels:
return await ctx.error(f"You need `Manage Channel` permissions in {channel.mention} to add entries.")
params = params.split(",", 1)
name = params[0]
reason = None
if len(params) > 1:
reason = params[1]
char = await get_character(ctx.bot, name)
if char is None:
await ctx.error("A character with that name doesn't exist.")
return
world = ctx.world
if char.world != world:
await ctx.error(f"This character is not in **{world}**.")
return
message = await ctx.send(f"Do you want to add **{char.name}** (Level {char.level} {char.vocation}) "
f"to the watchlist {channel.mention}")
confirm = await ctx.react_confirm(message, delete_after=True)
if confirm is None:
await ctx.send("You took too long!")
return
if not confirm:
await ctx.send("Ok then, guess you changed your mind.")
return
entry = await watchlist.add_entry(ctx.pool, char.name, False, ctx.author.id, reason)
if entry:
await ctx.success(f"Character **{char.name}** added to the watchlist {channel.mention}.")
else:
await ctx.error(f"**{char.name}** is already registered in {channel.mention}")
@checks.tracking_world_only()
@checks.channel_mod_somewhere()
@watchlist.command(name="addguild", usage="<channel> <name>[,reason]")
async def watchlist_addguild(self, ctx: NabCtx, channel: discord.TextChannel, *, params):
"""Adds an entire guild to a watchlist.
Guilds are displayed in the watchlist as a group."""
watchlist = await Watchlist.get_by_channel_id(ctx.pool, channel.id)
if not watchlist:
return await ctx.error(f"{channel.mention} is not a watchlist channel.")
if not channel.permissions_for(ctx.author).manage_channels:
return await ctx.error(f"You need `Manage Channel` permissions in {channel.mention} to add entries.")
params = params.split(",", 1)
name = params[0]
reason = None
if len(params) > 1:
reason = params[1]
guild = await get_guild(name)
if guild is None:
await ctx.error("There's no guild with that name.")
return
if guild.world != ctx.world:
await ctx.error(f"This guild is not in **{ctx.world}**.")
return
message = await ctx.send(f"Do you want to add the guild **{guild.name}** to the watchlist {channel.mention}?")
confirm = await ctx.react_confirm(message, delete_after=True)
if confirm is None:
await ctx.send("You took too long!")
return
if not confirm:
await ctx.send("Ok then, guess you changed your mind.")
return
entry = await watchlist.add_entry(ctx.pool, guild.name, True, ctx.author.id, reason)
if entry:
await ctx.success(f"Guild **{guild.name}** added to the watchlist {channel.mention}.")
else:
await ctx.error(f"**{guild.name}** is already registered in {channel.mention}")
@checks.tracking_world_only()
@checks.channel_mod_somewhere()
@watchlist.command(name="adduser", usage="<channel> <user>[,reason]")
async def watchlist_adduser(self, ctx: NabCtx, channel: discord.TextChannel, *, params):
"""Adds the currently registered characters of a user to the watchlist.
A reason can be specified by adding it after the character's name, separated by a comma."""
watchlist = await Watchlist.get_by_channel_id(ctx.pool, channel.id)
if not watchlist:
return await ctx.error(f"{channel.mention} is not a watchlist channel.")
if not channel.permissions_for(ctx.author).manage_channels:
return await ctx.error(
f"You need `Manage Channel` permissions in {channel.mention} to add entries.")
params = params.split(",", 1)
name = params[0]
reason = None
if len(params) > 1:
reason = params[1]
user = ctx.bot.get_member(name, ctx.guild)
if user is None:
return await ctx.error("I don't see any users with that name or id.")
characters = await DbChar.get_chars_by_user(ctx.pool, user.id, worlds=ctx.world)
if not characters:
await ctx.error(f"This user doesn't have any registered characters in {ctx.world}.")
return
char_list = "\n".join(f"• {c.name}" for c in characters)
message = await ctx.send(f"Do you want to add currently registered characters of `{user}` to this watchlist?\n"
f"{char_list}")
confirm = await ctx.react_confirm(message)
if confirm is None:
await ctx.send("You took too long!")
return
if not confirm:
await ctx.send("Ok then, guess you changed your mind.")
return
results = ""
for char in characters:
entry = await watchlist.add_entry(ctx.pool, char.name, False, ctx.author.id, reason)
if entry:
results += f"\n• {char.name}"
if results:
await ctx.success(f"I added the following characters to the list {channel.mention}, "
f"duplicates where skipped:{results}")
else:
await ctx.error("No characters where added, as they were all duplicates.")
@checks.server_mod_only()
@checks.tracking_world_only()
@watchlist.command(name="create")
async def watchlist_create(self, ctx: NabCtx, *, name):
"""Creates a watchlist channel.
Creates a new text channel for the watchlist to be posted.
The watch list shows which characters from it are online. Entire guilds can be added too.
The channel can be renamed at anytime. If the channel is deleted, all its entries are deleted too.
"""
if WATCHLIST_SEPARATOR in name:
await ctx.error(f"Channel name cannot contain the special character **{WATCHLIST_SEPARATOR}**")
return
if not ctx.bot_permissions.manage_channels:
return await ctx.error(f"I need `Manage Channels` permission in the server to use this command.")
message = await ctx.send(f"Do you want to create a new watchlist named `{name}`?")
confirm = await ctx.react_confirm(message, delete_after=True)
if not confirm:
return
try:
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(send_messages=False, read_messages=True),
ctx.guild.me: discord.PermissionOverwrite(send_messages=True, read_messages=True, manage_channels=True)
}
channel = await ctx.guild.create_text_channel(name, overwrites=overwrites, category=ctx.channel.category)
except discord.Forbidden:
await ctx.error(f"Sorry, I don't have permissions to create channels.")
except discord.HTTPException:
await ctx.error(f"Something went wrong, the channel name you chose is probably invalid.")
else:
log.info(f"Watchlist created (Channel ID: {channel.id}, Guild ID: {channel.guild.id})")
await ctx.success(f"Channel created successfully: {channel.mention}\n")
await channel.send("This is where I will post a list of online watched characters.\n"
"Edit this channel's permissions to allow the roles you want.\n"
"This channel can be renamed freely.\n"
"Anyone with `Manage Channel` permission here can add entries.\n"
f"Example: {ctx.clean_prefix}{ctx.command.full_parent_name} add {channel.mention} "
f"Galarzaa Fidera\n"
"If this channel is deleted, all related entries will be lost.\n"
"**It is important to not allow anyone to write in here**\n"
"*This message can be deleted now.*")
watchlist = await Watchlist.insert(ctx.pool, ctx.guild.id, channel.id, ctx.author.id)
log.debug(f"{self.tag} Watchlist created | {watchlist}")
@checks.channel_mod_somewhere()
@checks.tracking_world_only()
@watchlist.command(name="info", aliases=["details", "reason"])
async def watchlist_info(self, ctx: NabCtx, channel: discord.TextChannel, *, name: str):
"""Shows information about a watchlist entry.
This shows who added the player, when, and if there's a reason why they were added."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
entry = await WatchlistEntry.get_by_name(ctx.pool, channel.id, name, False)
if not entry:
return await ctx.error(f"There's no character with that name registered to {channel.mention}.")
embed = discord.Embed(title=entry.name, url=tibiapy.Character.get_url(entry.name), timestamp=entry.created,
description=f"**Reason:** {entry.reason}" if entry.reason else "No reason provided.")
embed.set_author(name=f"In #{channel}")
author = ctx.guild.get_member(entry.user_id)
if author:
embed.set_footer(text=f"Added by {author.name}#{author.discriminator}",
icon_url=get_user_avatar(author))
await ctx.send(embed=embed)
@checks.channel_mod_somewhere()
@checks.tracking_world_only()
@watchlist.command(name="infoguild", aliases=["detailsguild", "reasonguild"])
async def watchlist_infoguild(self, ctx: NabCtx, channel: discord.TextChannel, *, name: str):
""""Shows details about a guild entry in a watchlist.
This shows who added the player, when, and if there's a reason why they were added."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
entry = await WatchlistEntry.get_by_name(ctx.pool, channel.id, name, True)
if not entry:
return await ctx.error(f"There's no guild with that name registered to {channel.mention}.")
embed = discord.Embed(title=entry.name, timestamp=entry.created, url=tibiapy.Guild.get_url(entry.name),
description=f"**Reason:** {entry.reason}" if entry.reason else "No reason provided.")
embed.set_author(name=f"In #{channel}")
author = ctx.guild.get_member(entry.user_id)
if author:
embed.set_footer(text=f"Added by {author.name}#{author.discriminator}",
icon_url=get_user_avatar(author))
await ctx.send(embed=embed)
@checks.tracking_world_only()
@watchlist.command(name="list")
async def watchlist_list(self, ctx: NabCtx, channel: discord.TextChannel):
"""Shows characters belonging to that watchlist.
Note that this lists all characters, not just online characters."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
if not channel.permissions_for(ctx.author).read_messages:
return await ctx.error("You can't see the list of a watchlist you can't see.")
entries = await WatchlistEntry.get_entries_by_channel(ctx.pool, channel.id)
entries = [entry for entry in entries if not entry.is_guild]
if not entries:
return await ctx.error(f"This watchlist has no registered characters.")
pages = Pages(ctx, entries=[f"[{r.name}]({NabChar.get_url(r.name)})" for r in entries])
pages.embed.title = f"Watched Characters in #{channel.name}"
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.error(e)
@checks.tracking_world_only()
@watchlist.command(name="listguilds", aliases=["guilds", "guildlist"])
async def watchlist_list_guild(self, ctx: NabCtx, channel: discord.TextChannel):
"""Shows a list of guilds in the watchlist."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
entries = await WatchlistEntry.get_entries_by_channel(ctx.pool, channel.id)
entries = [entry for entry in entries if entry.is_guild]
if not channel.permissions_for(ctx.author).read_messages:
return await ctx.error("You can't see the list of a watchlist you can't see.")
if not entries:
return await ctx.error(f"This watchlist has no registered characters.")
pages = Pages(ctx, entries=[f"[{r.name}]({Guild.get_url(r.name)})" for r in entries])
pages.embed.title = f"Watched Guilds in #{channel.name}"
try:
await pages.paginate()
except CannotPaginate as e:
await ctx.error(e)
@checks.channel_mod_somewhere()
@checks.tracking_world_only()
@watchlist.command(name="remove", aliases=["removeplayer", "removechar"])
async def watchlist_remove(self, ctx: NabCtx, channel: discord.TextChannel, *, name):
"""Removes a character from a watchlist."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
entry = await WatchlistEntry.get_by_name(ctx.pool, channel.id, name, False)
if entry is None:
return await ctx.error(f"There's no character with that name registered in {channel.mention}.")
message = await ctx.send(f"Do you want to remove **{name}** from this watchlist?")
confirm = await ctx.react_confirm(message)
if confirm is None:
await ctx.send("You took too long!")
return
if not confirm:
await ctx.send("Ok then, guess you changed your mind.")
return
await entry.remove(ctx.pool)
await ctx.success("Character removed from the watchlist.")
@checks.channel_mod_somewhere()
@checks.tracking_world_only()
@watchlist.command(name="removeguild")
async def watchlist_removeguild(self, ctx: NabCtx, channel: discord.TextChannel, *, name):
"""Removes a guild from the watchlist."""
if not await Watchlist.get_by_channel_id(ctx.pool, channel.id):
return await ctx.error(f"{channel.mention} is not a watchlist.")
entry = await WatchlistEntry.get_by_name(ctx.pool, channel.id, name, True)
if entry is None:
return await ctx.error(f"There's no guild with that name registered in {channel.mention}.")
message = await ctx.send(f"Do you want to remove **{name}** from this watchlist?")
confirm = await ctx.react_confirm(message)
if confirm is None:
await ctx.send("You took too long!")
return
if not confirm:
await ctx.send("Ok then, guess you changed your mind.")
return
await entry.remove(ctx.pool)
await ctx.success("Guild removed from the watchlist.")
@checks.channel_mod_somewhere()
@checks.tracking_world_only()
@watchlist.command(name="showcount", usage="<channel> <yes|no>")
async def watchlist_showcount(self, ctx: NabCtx, channel: discord.TextChannel, yes_no):
"""Changes whether the online count will be displayed in the watchlist's channel's name or not."""
watchlist = await Watchlist.get_by_channel_id(ctx.pool, channel.id)
if not watchlist:
return await ctx.error(f"{channel.mention} is not a watchlist.")
if yes_no.lower().strip() in ["yes", "true"]:
await watchlist.update_show_count(ctx.pool, True)
await ctx.success("Showing online count is now enabled. The name will be updated on the next cycle.")
elif yes_no.lower().strip() in ["no", "false"]:
await watchlist.update_show_count(ctx.pool, False)
await ctx.success("Showing online count is now disabled. The name will be updated on the next cycle.")
else:
await ctx.error("That's not a valid option, try `yes` or `no`.")
# endregion
# region Methods
async def announce_death(self, char: NabChar, death: Death, levels_lost=0):
"""Announces a level up on the corresponding servers."""
log_msg = f"{self.tag}[{char.world}] announce_death: {char.name} | {death.level} | {death.killer.name}"
# Find killer article (a/an)
killer_article = ""
if not death.by_player:
killer_article = death.killer.name.split(" ", 1)
if killer_article[0] in ["a", "an"] and len(killer_article) > 1:
death.killer.name = killer_article[1]
killer_article = killer_article[0] + " "
else:
killer_article = ""
if death.killer.name.lower() in ["death", "energy", "earth", "fire", "pit battler", "pit berserker",
"pit blackling",
"pit brawler", "pit condemned", "pit demon", "pit destroyer", "pit fiend",
"pit groveller", "pit grunt", "pit lord", "pit maimer", "pit overlord",
"pit reaver",
"pit scourge"] and levels_lost == 0:
# Skip element damage deaths unless player lost a level to avoid spam from arena deaths
# This will cause a small amount of deaths to not be announced but it's probably worth the tradeoff
log.debug(f"{log_msg} | Skipping arena death")
return
guilds = [s for s, w in self.bot.tracked_worlds.items() if w == char.world]
for guild_id in guilds:
guild = self.bot.get_guild(guild_id)
if guild is None:
continue
min_level = await get_server_property(self.bot.pool, guild_id, "announce_level", config.announce_threshold)
if death.level < min_level:
log.debug(f"{log_msg} | Guild skipped {guild_id} | Level under limit")
continue
if guild.get_member(char.owner_id) is None:
log.debug(f"{log_msg} | Guild skipped {guild_id} | Owner not in server")
continue
simple_messages = await get_server_property(self.bot.pool, guild_id, "simple_messages", False)
condition = DeathMessageCondition(char=char, death=death, levels_lost=levels_lost, min_level=min_level)
# Select a message
if death.by_player:
message = weighed_choice(death_messages_player, condition) if not simple_messages else SIMPLE_DEATH
else:
message = weighed_choice(death_messages_monster, condition) if not simple_messages else SIMPLE_PVP_DEATH
# Format message with death information
message = message.format(**{'name': char.name, 'level': death.level, 'killer': death.killer.name,
'killer_article': killer_article, 'he_she': char.he_she.lower(),
'his_her': char.his_her.lower(), 'him_her': char.him_her.lower()})
# Format extra stylization
message = f"{config.pvpdeath_emoji if death.by_player else config.death_emoji} {format_message(message)}"
channel_id = await get_server_property(self.bot.pool, guild.id, "levels_channel")
channel = self.bot.get_channel_or_top(guild, channel_id)
try:
await channel.send(message[:1].upper() + message[1:])
log.debug(f"{log_msg} | Announced in {guild_id}")
except discord.Forbidden:
log.warning(f"{log_msg} | Forbidden error | Channel {channel.id} | Server {guild.id}")
except discord.HTTPException:
log.exception(f"{log_msg}")
async def announce_level(self, char: NabChar, level: int):
"""Announces a level up on corresponding servers."""
log_msg = f"{self.tag}[{char.world}] announce_level: : {char.name} | {level}"
guilds = [s for s, w in self.bot.tracked_worlds.items() if w == char.world]
for guild_id in guilds:
guild: discord.Guild = self.bot.get_guild(guild_id)
if guild is None:
continue
min_level = await get_server_property(self.bot.pool, guild_id, "announce_level", config.announce_threshold)
if char.level < min_level:
log.debug(f"{log_msg} | Guild skipped {guild_id} | Level under limit")
continue
if guild.get_member(char.owner_id) is None:
log.debug(f"{log_msg} | Guild skipped {guild_id} | Owner not in server")
continue
channel_id = await get_server_property(self.bot.pool, guild.id, "levels_channel")
simple_messages = await get_server_property(self.bot.pool, guild_id, "simple_messages", False)
channel = self.bot.get_channel_or_top(guild, channel_id)
try:
# Select a message
if not simple_messages:
message = weighed_choice(level_messages, LevelCondition(char=char, level=level,
min_level=min_level))
else:
message = SIMPLE_LEVEL
# Format message with level information
message = message.format(**{'name': char.name, 'level': level, 'he_she': char.he_she.lower(),
'his_her': char.his_her.lower(), 'him_her': char.him_her.lower()})
# Format extra stylization
message = f"{config.levelup_emoji} {format_message(message)}"
await channel.send(message)
log.debug(f"{log_msg} | Announced in {guild_id}")
except discord.Forbidden:
log.warning(f"{log_msg} | Forbidden error | Channel {channel.id} | Server {guild.id}")
except discord.HTTPException:
log.exception(f"{log_msg}")
@staticmethod
async def cached_get_guild(guild_name: str, world: str) -> Optional[Guild]:
"""
Used to cache guild info, to avoid fetching the same guild multiple times if they are in multiple lists
"""
if guild_name in GUILD_CACHE[world]:
return GUILD_CACHE[world][guild_name]
guild = await get_guild(guild_name)
GUILD_CACHE[world][guild_name] = guild
return guild
@classmethod
async def check_char_availability(cls, ctx: NabCtx, user_id: int, char: NabChar, worlds: List[str],
check_other=False):
"""Checks the availability of a character and other visible characters optionally.
:param ctx: The command context where this is called.
:param user_id: The id of the user against which the characters will be checked for.
:param char: The character to be checked.
:param worlds: The worlds to filter characters from.
:param check_other: Whether other characters in the same account should be processed to or not.
:return: A named tuple containing the different categories of characters found.
"""
skipped = [] # type: List[OtherCharacter]
"""Characters that were skipped due to being in another world or scheduled for deletion."""
no_user = [] # type: List[DbChar]
"""Characters that belong to users no longer visible to NabBot, most of the time abandoned temporal users."""
same_owner = [] # type: List[DbChar]
"""Characters that already belong to the user."""
different_user = [] # type: List[DbChar]
"""Characters belonging to a different user."""
unregistered = [] # type: List[NabChar]
"""Characters that have never been registered."""
if check_other and not char.hidden:
chars: List[Union[OtherCharacter, NabChar]] = char.other_characters
_char = next((x for x in chars if x.name == char.name))
chars[chars.index(_char)] = char
else:
chars = [char]
for char in chars:
if char.world not in worlds or char.deleted:
skipped.append(char)
continue
db_char = await DbChar.get_by_name(ctx.pool, char.name)
if db_char:
owner = ctx.bot.get_user(db_char.user_id)
if owner is None:
no_user.append(db_char)
continue
elif db_char.user_id == user_id:
same_owner.append(db_char)
continue
different_user.append(db_char)
continue
if isinstance(char, OtherCharacter):
char = await get_character(ctx.bot, char.name)
unregistered.append(char)
return CharactersResult._make((skipped, no_user, same_owner, different_user, unregistered,
len(skipped) == len(chars)))
async def compare_deaths(self, char: NabChar):
"""Checks if the player has new deaths.
New deaths are announced if they are not older than 30 minutes."""
if char is None:
return
async with self.bot.pool.acquire() as conn:
db_char = await DbChar.get_by_name(conn, char.name)
if db_char is None:
return
pending_deaths = []
for death in char.deaths:
# Check if we have a death that matches the time
exists = await DbDeath.exists(conn, db_char.id, death.level, death.time)
if exists:
# We already have this death, we're assuming we already have older deaths
break
pending_deaths.append(death)
# Announce and save deaths from older to new
for death in reversed(pending_deaths):
db_death = DbDeath.from_tibiapy(death)
db_death.character_id = db_char.id
await db_death.save(conn)
log_msg = f"{self.tag}[{char.world}] Death detected: {char.name} | {death.level} |" \
f" {death.killer.name}"
if (dt.datetime.now(dt.timezone.utc)- death.time) >= dt.timedelta(minutes=30):
log.info(f"{log_msg} | Too old to announce.")
# Only try to announce if character has an owner
elif char.owner_id:
log.info(log_msg)
await self.announce_death(char, death, max(death.level - char.level, 0))
async def compare_levels(self, char: Union[NabChar, OnlineCharacter], update_only=False):
"""Compares the character's level with the stored level in database.
This should only be used on online characters or characters that just became offline."""
if char is None:
return
async with self.bot.pool.acquire() as conn:
db_char = await DbChar.get_by_name(conn, char.name)
if not db_char:
return
# OnlineCharacter has no sex attribute, so we get it from database and convert to NabChar
if isinstance(char, OnlineCharacter):
char = NabChar.from_online(char, db_char.sex, db_char.user_id)
level_before = db_char.level
if level_before != char.level:
await db_char.update_level(conn, char.level)
log.debug(f"{self.tag}[{char.world}][compare_level] {char.name}'s level updated:"
f" {level_before} -> {char.level}")
if not (char.level > level_before > 0) or update_only:
return
# Saving level up date in database
await DbLevelUp.insert(conn, db_char.id, char.level)
# Announce the level up
log.info(f"{self.tag}[{char.world}] Level up detected: {char.name} | {char.level}")
# Only try to announce level if char has an owner.
if char.owner_id:
await self.announce_level(char, char.level)
else:
log.debug(f"{self.tag}[{char.world}] Character has no owner, skipping")
@classmethod
async def process_character_assignment(cls, ctx: NabCtx, results: CharactersResult, user: discord.User,
author: discord.User = None, claim=False):
"""Processes the results of a character check and applies the changes
:param ctx: The command context
:param results: The character results
:param user: The user that will get the characters assigned.
:param author: The user that did the action, None if it was the same user.
:param claim: Whether the operation is a claim.
:return: A summary of the applied actions.
"""
recipient = f"**@{user.display_name}**" if author else "you"
author_log = f"| By {author}" if author else ""
reply = ""
if results.different_user and not claim:
first = results.different_user[0].name
reply = f"{ctx.tick(False)} Sorry, a character in that account ({first}) is already registered to " \
f"someone else.\n" \
f"If the character really belongs to {recipient}, `{ctx.clean_prefix}claim {first}` should be used."
return reply
if results.same_owner:
existent_names = [e.name for e in results.same_owner]
reply += f"\n⚫ The following characters were already registered to {recipient}: {join_list(existent_names)}"
if results.new:
added_names = [a.name for a in results.new]
reply += f"\n🔵 The following characters were added to {recipient}: {join_list(added_names)}"
if results.no_user:
updated_names = [r.name for r in results.no_user]
reply += f"\n⚪ The following characters were reassigned to {recipient}: {join_list(updated_names)}"
if results.different_user:
reclaimed_chars = [c.name for c in results.different_user]
reply += f"\n🔴 The following characters were reclaimed by you: {join_list(reclaimed_chars)}"
async with ctx.pool.acquire() as conn:
for char in results.different_user:
await char.update_user(conn, user.id)
log.info(f"{cls.get_tag()} Character Claimed | {char.name} | {user} ({user.id}){author_log}")
for char in results.no_user:
await char.update_user(conn, user.id)
log.info(f"{cls.get_tag()} Character Reassigned | {char.name} | {user} ({user.id}){author_log}")
for char in results.new:
db_char = await DbChar.insert(conn, char.name, char.level, char.vocation.value, user.id, char.world,
char.guild_name)
char.id = db_char.id
log.info(f"{cls.get_tag()} Character Registered | {char.name} | {user} ({user.id}){author_log}")
# If we are claiming, different user characters are also passed
if claim:
results.no_user.extend(results.different_user)
ctx.bot.dispatch("characters_registered", user, results.new, results.no_user, author)
ctx.bot.dispatch("character_change", user.id)
return reply
async def save_highscores(self, world: str, key: str, highscores: tibiapy.Highscores) -> int:
"""Saves the highscores of a world and category to the database."""
if highscores is None:
return 0
rows = [(e.rank, key, world, e.name, e.vocation.value, e.value) for e in highscores.entries]
async with self.bot.pool.acquire() as conn: # type: asyncpg.Connection
async with conn.transaction():
# Delete old records
await conn.execute("DELETE FROM highscores_entry WHERE category = $1 AND world = $2", key, world)
# Add current entries
await conn.copy_records_to_table("highscores_entry", records=rows,
columns=["rank", "category", "world", "name", "vocation", "value"])
log.debug(f"{self.tag}[{world}][save_highscores] {key} | {len(rows)} entries saved")
# Update scan times
await conn.execute("""INSERT INTO highscores(world, category, last_scan)
VALUES($1, $2, $3)
ON CONFLICT (world,category)
DO UPDATE SET last_scan = EXCLUDED.last_scan""",
world, key, dt.datetime.now(dt.timezone.utc))
return len(rows)
# endregion
def cog_unload(self):
log.info(f"{self.tag} Unloading cog")
self.scan_highscores_task.cancel()
self.scan_online_chars_task.cancel()
for k, v in self.world_tasks.items():
v.cancel()
def setup(bot):
bot.add_cog(Tracking(bot))
|
apache-2.0
| 2,001,326,355,620,940,500 | 47.838201 | 120 | 0.590449 | false |
ccmbioinfo/mugqic_pipelines
|
bfx/trimmomatic.py
|
1
|
3305
|
#!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import logging
import os
# MUGQIC Modules
from core.config import *
from core.job import *
log = logging.getLogger(__name__)
def trimmomatic(
input1,
input2,
paired_output1,
unpaired_output1,
paired_output2,
unpaired_output2,
single_output,
quality_offset,
adapter_file,
trim_log
):
if input2: # Paired end reads
inputs = [input1, input2]
outputs = [paired_output1, unpaired_output1, paired_output2, unpaired_output2]
else: # Single end reads
inputs = [input1]
outputs = [single_output]
headcrop_length = config.param('trimmomatic', 'headcrop_length', required=False, type='posint')
return Job(
inputs,
outputs + [trim_log],
[
['trimmomatic', 'module_java'],
['trimmomatic', 'module_trimmomatic']
],
# CAUTION: Trimmomatic settings order is IMPORTANT!
# FIRST Illuminaclip settings, THEN headcrop length, THEN trailing min quality, THEN minimum length
command="""\
java -XX:ParallelGCThreads=1 -Xmx{ram} -jar $TRIMMOMATIC_JAR {mode} \\
-threads {threads} \\
-phred{quality_offset} \\
{inputs} \\
{outputs} \\
ILLUMINACLIP:{adapter_file}{illumina_clip_settings}{headcrop_length} \\
TRAILING:{trailing_min_quality} \\
MINLEN:{min_length}{tophred33} \\
2> {trim_log}""".format(
ram=config.param('trimmomatic', 'ram'),
mode = "PE" if input2 else "SE",
threads=config.param('trimmomatic', 'threads', type='posint'),
quality_offset=quality_offset if quality_offset == 64 else "33",
inputs=" \\\n ".join(inputs),
outputs=" \\\n ".join(outputs),
adapter_file=adapter_file,
illumina_clip_settings=config.param('trimmomatic', 'illumina_clip_settings'),
headcrop_length=" \\\n HEADCROP:" + str(headcrop_length) if headcrop_length else "",
trailing_min_quality=config.param('trimmomatic', 'trailing_min_quality', type='int'),
min_length=config.param('trimmomatic', 'min_length', type='posint'),
tophred33=" \\\n TOPHRED33" if quality_offset == 64 else "",
trim_log=trim_log
),
removable_files=[paired_output1, unpaired_output1, paired_output2, unpaired_output2, single_output]
)
|
lgpl-3.0
| 8,791,786,266,534,984,000 | 36.134831 | 107 | 0.634493 | false |
makelove/OpenCV-Python-Tutorial
|
ch38-使用特征匹配和单应性查找对象/findHomography.py
|
1
|
3198
|
# -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午5:13
# @Author : play4fun
# @File : findHomography.py
# @Software: PyCharm
"""
findHomography.py:联合使用特征提取和 calib3d 模块中的 findHomography 在复杂图像中查找已知对象
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10
img1 = cv2.imread('../data/box.png', 0) # queryImage
img2 = cv2.imread('../data/box_in_scene.png', 0) # trainImage
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
'''
现在我们 置只有存在 10 个以上匹 时才去查找目标 MIN_MATCH_COUNT=10 否则显示 告消息 现在匹 不
如果找到了 够的匹 我们 提取两幅图像中匹 点的坐标。把它们传 入到函数中 算 变换。一旦我们找到 3x3 的变换矩 就可以使用它将查 图像的四个 点 四个 变换到目标图像中去了。然后再绘制出来。
'''
if len(good) > MIN_MATCH_COUNT:
# 获取关 点的坐标
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# 第三个参数 Method used to computed a homography matrix. The following methods are possible: #0 - a regular method using all the points
# CV_RANSAC - RANSAC-based robust method
# CV_LMEDS - Least-Median robust method
# 第四个参数取值范围在 1 到 10 绝一个点对的 值。原图像的点经 变换后点与目标图像上对应点的 差 # 差就 为是 outlier
# 回值中 M 为变换矩 。
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
# 获得原图像的高和宽
h, w = img1.shape
# 使用得到的变换矩 对原图像的四个 变换 获得在目标图像上对应的坐标
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
# 原图像为灰度图
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
# 最后我再绘制 inliers 如果能成功的找到目标图像的话 或者匹配的关 点 如果失败。
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMask=matchesMask, # draw only inliers
flags=2)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
plt.imshow(img3, 'gray'), plt.show()
# 复杂图像中被找到的目标图像被标记成白色
|
mit
| 5,273,761,605,515,775,000 | 34.589041 | 135 | 0.666282 | false |
n1cfury/BlackHatPython
|
pic_carver.py
|
1
|
2116
|
def banner():
print "[***] pCAP processing p56 [***]"
def http_assembler(pcap_file):
carved_images = o
faced_detected = 0
a = rdpcap(pacap_file)
sessions = a.sessions()
for session in sesions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image, image_type = extract_image(headers, http_payload)
if image is not None and image_type is not None:
file_name = "%s-pic_carver+%d.%s" % (pcap_file, carved_images, image_type)
fd = open ("%s/%s" % (pictured_directory, file+name), "wb")
fd.write(image)
fd.close()
carved_images += 1
try:
result = face+detect("%s/%s" % (pictures_directory, file_name), file_name)
if result is True:
faces+detected += 1
except:
pass
return carved+images, faced_detected
carved_images, faces+detected = http_assembler(pcap_file)
print "Extracted %d images" % carved_images
print "Detected: %d faces" % faces_detected
def get_http_headers(http_payload):
try:
headers_raw = http+payload[:http_payload.index("\r\n\r\n")+2]
headers = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
if "Content-Type" not in headers:
return None
return headers
def extract_image(headers, http_payload):
image = None
image_type = None
try:
if "image" in headers ['Content-Type']:
image_type = http_payload[http_payload.index("\r\n\r\n")+4:]
try:
if "Content-Encoding" in headers.keys():
if headers["Content-Encoding"]
def face_detect(path, file_name):
img = cv2.imread(path)
cascade = cs2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))
if len(rects) == 0:
return False:
rects[:, 2:] += rects[:, :2]
for x1,y1,x2,y2 in rects:
cv2.rectangle(img,(x1,y1),(x2,y2),(127,255,0),2)
cv2.imwrite("%s/%s-%s" % (faces_directory,pcap_file,file_name),img)
return True
|
mit
| 6,836,528,785,822,264,000 | 28.802817 | 83 | 0.65879 | false |
pizzathief/scipy
|
scipy/linalg/tests/test_decomp_cossin.py
|
1
|
5753
|
import pytest
import numpy as np
from numpy.random import seed
from numpy.testing import assert_allclose
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
from scipy.linalg import cossin, get_lapack_funcs
REAL_DTYPES = (np.float32, np.float64)
COMPLEX_DTYPES = (np.complex64, np.complex128)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('m, p, q',
[
(2, 1, 1),
(3, 2, 1),
(3, 1, 2),
(4, 2, 2),
(4, 1, 2),
(40, 12, 20),
(40, 30, 1),
(40, 1, 30),
(100, 50, 1),
(100, 50, 50),
])
@pytest.mark.parametrize('swap_sign', [True, False])
def test_cossin(dtype_, m, p, q, swap_sign):
seed(1234)
if dtype_ in COMPLEX_DTYPES:
x = np.array(unitary_group.rvs(m), dtype=dtype_)
else:
x = np.array(ortho_group.rvs(m), dtype=dtype_)
u, cs, vh = cossin(x, p, q,
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
# Test for float32 or float 64
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
_, cs2, vh2 = cossin(x, p, q,
compute_u=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
u2, cs2, _ = cossin(x, p, q,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
_, cs2, _ = cossin(x, p, q,
compute_u=False,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
def test_cossin_mixed_types():
seed(1234)
x = np.array(ortho_group.rvs(4), dtype=np.float)
u, cs, vh = cossin([x[:2, :2],
np.array(x[:2, 2:], dtype=np.complex128),
x[2:, :2],
x[2:, 2:]])
assert u.dtype == np.complex128
assert cs.dtype == np.float64
assert vh.dtype == np.complex128
assert_allclose(x, u @ cs @ vh, rtol=0.,
atol=1e4 * np.finfo(np.complex128).eps)
def test_cossin_error_incorrect_subblocks():
with pytest.raises(ValueError, match="be due to missing p, q arguments."):
cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
def test_cossin_error_empty_subblocks():
with pytest.raises(ValueError, match="x11.*empty"):
cossin(([], [], [], []))
with pytest.raises(ValueError, match="x12.*empty"):
cossin(([1, 2], [], [6, 7], [8, 9, 10]))
with pytest.raises(ValueError, match="x21.*empty"):
cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
with pytest.raises(ValueError, match="x22.*empty"):
cossin(([1, 2], [3, 4, 5], [2], []))
def test_cossin_error_missing_partitioning():
with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
cossin(unitary_group.rvs(2))
with pytest.raises(ValueError, match=".*might be due to missing p, q"):
cossin(unitary_group.rvs(4))
def test_cossin_error_non_iterable():
with pytest.raises(ValueError, match="containing the subblocks of X"):
cossin(12j)
def test_cossin_error_non_square():
with pytest.raises(ValueError, match="only supports square"):
cossin(np.array([[1, 2]]), 1, 1)
def test_cossin_error_partitioning():
x = np.array(ortho_group.rvs(4), dtype=np.float)
with pytest.raises(ValueError, match="invalid p=0.*0<p<4.*"):
cossin(x, 0, 1)
with pytest.raises(ValueError, match="invalid p=4.*0<p<4.*"):
cossin(x, 4, 1)
with pytest.raises(ValueError, match="invalid q=-2.*0<q<4.*"):
cossin(x, 1, -2)
with pytest.raises(ValueError, match="invalid q=5.*0<q<4.*"):
cossin(x, 1, 5)
@pytest.mark.parametrize("dtype_", DTYPES)
def test_cossin_separate(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
X = np.array(X, dtype=dtype_)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'),[X])
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'],
lwval))
*_, theta, u1, u2, v1t, v2t, _ = \
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
(u1_2, u2_2), theta2, (v1t_2, v2t_2) = cossin(X, p, q, separate=True)
assert_allclose(u1_2, u1, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(u2_2, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v1t_2, v1t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v2t_2, v2t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(theta2, theta, rtol=0., atol=10*np.finfo(dtype_).eps)
|
bsd-3-clause
| -8,984,024,263,995,868,000 | 36.357143 | 78 | 0.530506 | false |
Vagab0nd/SiCKRAGE
|
lib3/feedparser/__init__.py
|
1
|
2256
|
# Copyright 2010-2020 Kurt McKee <[email protected]>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE."""
from __future__ import absolute_import
from __future__ import unicode_literals
from .api import parse
from .datetimes import registerDateHandler
from .exceptions import *
from .util import FeedParserDict
__author__ = 'Kurt McKee <[email protected]>'
__license__ = 'BSD 2-clause'
__version__ = '6.0.0'
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "feedparser/%s +https://github.com/kurtmckee/feedparser/" % __version__
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
|
gpl-3.0
| 2,770,024,409,418,123,000 | 43.235294 | 84 | 0.77305 | false |
crosslinks/XlinkAnalyzer
|
pytests/XlaGuiTests.py
|
1
|
2262
|
import chimera
import unittest
from os import path
import xlinkanalyzer
from xlinkanalyzer import gui
RUNME = False
description = "Base classes for testing gui"
class XlaBaseTest(unittest.TestCase):
def setUp(self, mPaths, cPath):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
self.xlaTestCPath = path.join(xlaTestPath, cPath)
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui()
guiWin.configFrame.resMngr.loadAssembly(guiWin, self.xlaTestCPath)
guiWin.configFrame.clear()
guiWin.configFrame.update()
guiWin.configFrame.mainWindow.setTitle(guiWin.configFrame.config.file)
guiWin.configFrame.config.state = "unchanged"
self.config = guiWin.configFrame.config
class TestLoadFromStructure(unittest.TestCase):
def setUp(self, mPaths):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui()
guiWin.configFrame.resMngr.config.loadFromStructure(self.models[-1])
guiWin.configFrame.clear()
guiWin.configFrame.update()
guiWin.configFrame.config.state = "changed"
self.config = guiWin.configFrame.config
class XlaJustOpenXlaTest(unittest.TestCase):
def setUp(self, mPaths, cPath):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
self.xlaTestCPath = path.join(xlaTestPath, cPath)
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui()
|
gpl-2.0
| -6,684,249,693,731,408,000 | 32.279412 | 80 | 0.679487 | false |
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/tests/events/test_state.py
|
1
|
11017
|
from __future__ import absolute_import
from time import time
from itertools import count
from celery import states
from celery.events import Event
from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE_WINDOW
from celery.utils import uuid
from celery.tests.utils import Case
class replay(object):
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
def setup(self):
pass
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.events[self.position()])
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0).next
return self
def play(self):
for _ in self:
pass
class ev_worker_online_offline(replay):
def setup(self):
self.events = [
Event('worker-online', hostname='utest1'),
Event('worker-offline', hostname='utest1'),
]
class ev_worker_heartbeats(replay):
def setup(self):
self.events = [
Event('worker-heartbeat', hostname='utest1',
timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2),
Event('worker-heartbeat', hostname='utest1'),
]
class ev_task_states(replay):
def setup(self):
tid = self.tid = uuid()
self.events = [
Event('task-received', uuid=tid, name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='utest1'),
Event('task-started', uuid=tid, hostname='utest1'),
Event('task-revoked', uuid=tid, hostname='utest1'),
Event('task-retried', uuid=tid, exception="KeyError('bar')",
traceback='line 2 at main', hostname='utest1'),
Event('task-failed', uuid=tid, exception="KeyError('foo')",
traceback='line 1 at main', hostname='utest1'),
Event('task-succeeded', uuid=tid, result='4',
runtime=0.1234, hostname='utest1'),
]
class ev_snapshot(replay):
def setup(self):
self.events = [
Event('worker-online', hostname='utest1'),
Event('worker-online', hostname='utest2'),
Event('worker-online', hostname='utest3'),
]
for i in range(20):
worker = not i % 2 and 'utest2' or 'utest1'
type = not i % 2 and 'task2' or 'task1'
self.events.append(Event('task-received', name=type,
uuid=uuid(), hostname=worker))
class test_Worker(Case):
def test_survives_missing_timestamp(self):
worker = Worker(hostname='foo')
worker.on_heartbeat(timestamp=None)
self.assertEqual(worker.heartbeats, [])
def test_repr(self):
self.assertTrue(repr(Worker(hostname='foo')))
class test_Task(Case):
def test_info(self):
task = Task(uuid='abcdefg',
name='tasks.add',
args='(2, 2)',
kwargs='{}',
retries=2,
result=42,
eta=1,
runtime=0.0001,
expires=1,
exception=1,
received=time() - 10,
started=time() - 8,
exchange='celery',
routing_key='celery',
succeeded=time())
self.assertEqual(sorted(list(task._info_fields)),
sorted(task.info().keys()))
self.assertEqual(sorted(list(task._info_fields + ('received', ))),
sorted(task.info(extra=('received', ))))
self.assertEqual(sorted(['args', 'kwargs']),
sorted(task.info(['args', 'kwargs']).keys()))
def test_ready(self):
task = Task(uuid='abcdefg',
name='tasks.add')
task.on_received(timestamp=time())
self.assertFalse(task.ready)
task.on_succeeded(timestamp=time())
self.assertTrue(task.ready)
def test_sent(self):
task = Task(uuid='abcdefg',
name='tasks.add')
task.on_sent(timestamp=time())
self.assertEqual(task.state, states.PENDING)
def test_merge(self):
task = Task()
task.on_failed(timestamp=time())
task.on_started(timestamp=time())
task.on_received(timestamp=time(), name='tasks.add', args=(2, 2))
self.assertEqual(task.state, states.FAILURE)
self.assertEqual(task.name, 'tasks.add')
self.assertTupleEqual(task.args, (2, 2))
task.on_retried(timestamp=time())
self.assertEqual(task.state, states.RETRY)
def test_repr(self):
self.assertTrue(repr(Task(uuid='xxx', name='tasks.add')))
class test_State(Case):
def test_repr(self):
self.assertTrue(repr(State()))
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
r.next()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers['utest1'].alive)
r.play()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers['utest1'].alive)
def test_itertasks(self):
s = State()
s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
self.assertEqual(len(list(s.itertasks(limit=2))), 2)
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
r.next()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers['utest1'].alive)
r.play()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers['utest1'].alive)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
r.next()
self.assertTrue(r.tid in r.state.tasks)
task = r.state.tasks[r.tid]
self.assertEqual(task.state, states.RECEIVED)
self.assertTrue(task.received)
self.assertEqual(task.timestamp, task.received)
self.assertEqual(task.worker.hostname, 'utest1')
# STARTED
r.next()
self.assertTrue(r.state.workers['utest1'].alive,
'any task event adds worker heartbeat')
self.assertEqual(task.state, states.STARTED)
self.assertTrue(task.started)
self.assertEqual(task.timestamp, task.started)
self.assertEqual(task.worker.hostname, 'utest1')
# REVOKED
r.next()
self.assertEqual(task.state, states.REVOKED)
self.assertTrue(task.revoked)
self.assertEqual(task.timestamp, task.revoked)
self.assertEqual(task.worker.hostname, 'utest1')
# RETRY
r.next()
self.assertEqual(task.state, states.RETRY)
self.assertTrue(task.retried)
self.assertEqual(task.timestamp, task.retried)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.exception, "KeyError('bar')")
self.assertEqual(task.traceback, 'line 2 at main')
# FAILURE
r.next()
self.assertEqual(task.state, states.FAILURE)
self.assertTrue(task.failed)
self.assertEqual(task.timestamp, task.failed)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.exception, "KeyError('foo')")
self.assertEqual(task.traceback, 'line 1 at main')
# SUCCESS
r.next()
self.assertEqual(task.state, states.SUCCESS)
self.assertTrue(task.succeeded)
self.assertEqual(task.timestamp, task.succeeded)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.result, '4')
self.assertEqual(task.runtime, 0.1234)
def assertStateEmpty(self, state):
self.assertFalse(state.tasks)
self.assertFalse(state.workers)
self.assertFalse(state.event_count)
self.assertFalse(state.task_count)
def assertState(self, state):
self.assertTrue(state.tasks)
self.assertTrue(state.workers)
self.assertTrue(state.event_count)
self.assertTrue(state.task_count)
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
self.assertFalse(s.event_count)
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
self.assertTrue(s2.event_count)
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
self.assertTrue(s.tasks)
s.clear_tasks(ready=False)
self.assertFalse(s.tasks)
def test_clear(self):
r = ev_snapshot(State())
r.play()
self.assertTrue(r.state.event_count)
self.assertTrue(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertTrue(r.state.task_count)
r.state.clear()
self.assertFalse(r.state.event_count)
self.assertFalse(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertFalse(r.state.task_count)
r.state.clear(False)
self.assertFalse(r.state.tasks)
def test_task_types(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(sorted(r.state.task_types()), ['task1', 'task2'])
def test_tasks_by_timestamp(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_timestamp()), 20)
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_type('task1')), 10)
self.assertEqual(len(r.state.tasks_by_type('task2')), 10)
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.alive_workers()), 3)
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_worker('utest1')), 10)
self.assertEqual(len(r.state.tasks_by_worker('utest2')), 10)
def test_survives_unknown_worker_event(self):
s = State()
s.worker_event('worker-unknown-event-xxx', {'foo': 'bar'})
s.worker_event('worker-unknown-event-xxx', {'hostname': 'xxx',
'foo': 'bar'})
def test_survives_unknown_task_event(self):
s = State()
s.task_event('task-unknown-event-xxx', {'foo': 'bar',
'uuid': 'x',
'hostname': 'y'})
def test_callback(self):
scratch = {}
def callback(state, event):
scratch['recv'] = True
s = State(callback=callback)
s.event({'type': 'worker-online'})
self.assertTrue(scratch.get('recv'))
|
bsd-3-clause
| 9,053,397,113,431,262,000 | 30.74928 | 76 | 0.564764 | false |
sknepneklab/SAMoS
|
configurations/filaments_on_plane/make_filaments.py
|
1
|
2429
|
# * *************************************************************
# *
# * Soft Active Mater on Surfaces (SAMoS)
# *
# * Author: Rastko Sknepnek
# *
# * Division of Physics
# * School of Engineering, Physics and Mathematics
# * University of Dundee
# *
# * (c) 2013, 2014
# *
# * School of Science and Engineering
# * School of Life Sciences
# * University of Dundee
# *
# * (c) 2015, 2016
# *
# * Author: Silke Henkes
# *
# * Department of Physics
# * Institute for Complex Systems and Mathematical Biology
# * University of Aberdeen
# *
# * (c) 2014, 2015, 2016
# *
# * This program cannot be used, copied, or modified without
# * explicit written permission of the authors.
# *
# * ***************************************************************
# Utility code for building an input file for multiple filaments in plane
from datetime import *
from random import uniform, seed
from math import *
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("-N", "--N", type=int, default=20, help="numper of particles in a polymer")
parser.add_argument("-M", "--M", type=int, default=50, help="numper of polymers")
parser.add_argument("-o", "--output", type=str, default='filaments.input', help="output file")
parser.add_argument("-i", "--input", type=str, default='filaments.xyz', help="output file")
args = parser.parse_args()
X = []
Y = []
with open(args.input,'r') as inp:
lines = inp.readlines()
for i in range(2,len(lines)):
line = lines[i].strip()
x, y = map(float,line.split()[1:3])
X.append(x)
Y.append(y)
bonds = []
for i in range(len(X)):
if i == 0 or (i+1) % args.N != 0:
bonds.append((i,i+1))
angles = []
for j in range(args.M):
for i in range(1,args.N-1):
angles.append((j*args.N+i-1,j*args.N+i,j*args.N+i+1))
out = open(args.output,'w')
out.write('keys: id molecule type x y nx ny\n')
mol = 0
for i in range(len(X)):
phi = uniform(0,2*pi)
nx, ny = cos(phi), sin(phi)
if i > 0 and i % args.N == 0: mol += 1
out.write('%d %d %d %f %f %f %f\n' % (i,mol,1,X[i],Y[i],nx,ny))
out.close()
out = open(args.output.split('.')[0]+'.bonds','w')
idx = 0
for (i,j) in bonds:
out.write('%d %d %d %d\n' % (idx,1,i,j))
idx += 1
out.close()
out = open(args.output.split('.')[0]+'.angles','w')
idx = 0
for (i,j,k) in angles:
out.write('%d %d %d %d %d\n' % (idx,1,i,j,k))
idx += 1
out.close()
|
gpl-3.0
| 5,429,665,083,915,058,000 | 25.402174 | 95 | 0.578016 | false |
lauromoraes/redes
|
MyTCPRequestHandler.py
|
1
|
1443
|
import logging
import socket
import threading
import SocketServer
import time
from recvall import *
from calc import *
logging.basicConfig( level = logging.DEBUG, format = "%(name)s: %(message)s", )
class MyTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('MyTCPRequestHandler')
self.logger.debug('__init__')
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
data = recvall(self.request, 2)
#print(self.request.accept()[1])
#current_thread = threading.currentThread()
#resp = "%s, %s" % (current_thread.getName(), data)
#self.logger.debug('Thread: %s | recv()->"%s"', current_thread.getName(), data)
#self.logger.debug('Threads: %s' % str( [ t.getName() for t in threading.enumerate()] ) )
resp = calc(data)
sent = 0
size = 1024*5
while(sent < len(resp)):
if(sent+size <= len(resp)):
sent += self.request.send(resp[sent:sent+size])
else:
sent += self.request.send(resp[sent:])
time.sleep(0.1)
#self.request.sendall("data")
self.request.shutdown(socket.SHUT_WR)
self.request.close()
#time.sleep(3)
return
def finish(self):
self.logger.debug('finish')
return SocketServer.BaseRequestHandler.finish(self)
|
gpl-2.0
| -1,314,863,412,308,185,900 | 29.0625 | 91 | 0.699931 | false |
vnbrs/project-euler
|
problem-13.py
|
1
|
5266
|
n_list = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690]
s = 0
for i in n_list:
s += i
print(str(s)[0:10])
|
mit
| 6,598,877,877,318,739,000 | 48.679245 | 61 | 0.955564 | false |
Caesurus/CTF_Writeups
|
2019-PicoCTF/exploits/exploit_handyshellcode.py
|
1
|
1279
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# This exploit template was generated via:
# $ pwn template
from pwn import *
# Set up pwntools for the correct architecture
context.update(arch='i386')
exe = '/problems/handy-shellcode_4_b724dbfabf610aaa4841af644535be30/vuln'
# Many built-in settings can be controlled on the command-line and show up
# in "args". For example, to dump all data sent/received, and disable ASLR
# for all created processes...
# ./exploit.py DEBUG NOASLR
def start(argv=[], *a, **kw):
'''Start the exploit against the target.'''
if args.GDB:
return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)
else:
return process([exe] + argv, *a, **kw)
# Specify your GDB script here for debugging
# GDB will be launched if the exploit is run via e.g.
# ./exploit.py GDB
gdbscript = '''
continue
'''.format(**locals())
#===========================================================
# EXPLOIT GOES HERE
#===========================================================
io = start()
shellcode = asm(shellcraft.sh())
io.sendline(shellcode)
# payload = fit({
# 32: 0xdeadbeef,
# 'iaaa': [1, 2, 'Hello', 3]
# }, length=128)
# io.send(payload)
# flag = io.recv(...)
# log.success(flag)
io.interactive()
|
apache-2.0
| -1,788,899,926,752,616,000 | 26.212766 | 75 | 0.593432 | false |
cloudtools/awacs
|
awacs/s3.py
|
1
|
7683
|
# Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon S3"
prefix = "s3"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
# account is empty for S3 buckets
if not resource.startswith(("accesspoint/", "job/", "storage-lens/")):
account = ""
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AbortMultipartUpload = Action("AbortMultipartUpload")
BypassGovernanceRetention = Action("BypassGovernanceRetention")
CreateAccessPoint = Action("CreateAccessPoint")
CreateAccessPointForObjectLambda = Action("CreateAccessPointForObjectLambda")
CreateBucket = Action("CreateBucket")
CreateJob = Action("CreateJob")
DeleteAccessPoint = Action("DeleteAccessPoint")
DeleteAccessPointForObjectLambda = Action("DeleteAccessPointForObjectLambda")
DeleteAccessPointPolicy = Action("DeleteAccessPointPolicy")
DeleteAccessPointPolicyForObjectLambda = Action(
"DeleteAccessPointPolicyForObjectLambda"
)
DeleteBucket = Action("DeleteBucket")
DeleteBucketOwnershipControls = Action("DeleteBucketOwnershipControls")
DeleteBucketPolicy = Action("DeleteBucketPolicy")
DeleteBucketWebsite = Action("DeleteBucketWebsite")
DeleteJobTagging = Action("DeleteJobTagging")
DeleteObject = Action("DeleteObject")
DeleteObjectTagging = Action("DeleteObjectTagging")
DeleteObjectVersion = Action("DeleteObjectVersion")
DeleteObjectVersionTagging = Action("DeleteObjectVersionTagging")
DeleteStorageLensConfiguration = Action("DeleteStorageLensConfiguration")
DeleteStorageLensConfigurationTagging = Action("DeleteStorageLensConfigurationTagging")
DescribeJob = Action("DescribeJob")
GetAccelerateConfiguration = Action("GetAccelerateConfiguration")
GetAccessPoint = Action("GetAccessPoint")
GetAccessPointConfigurationForObjectLambda = Action(
"GetAccessPointConfigurationForObjectLambda"
)
GetAccessPointForObjectLambda = Action("GetAccessPointForObjectLambda")
GetAccessPointPolicy = Action("GetAccessPointPolicy")
GetAccessPointPolicyForObjectLambda = Action("GetAccessPointPolicyForObjectLambda")
GetAccessPointPolicyStatus = Action("GetAccessPointPolicyStatus")
GetAccessPointPolicyStatusForObjectLambda = Action(
"GetAccessPointPolicyStatusForObjectLambda"
)
GetAccountPublicAccessBlock = Action("GetAccountPublicAccessBlock")
GetAnalyticsConfiguration = Action("GetAnalyticsConfiguration")
GetBucketAcl = Action("GetBucketAcl")
GetBucketCORS = Action("GetBucketCORS")
GetBucketLocation = Action("GetBucketLocation")
GetBucketLogging = Action("GetBucketLogging")
GetBucketNotification = Action("GetBucketNotification")
GetBucketObjectLockConfiguration = Action("GetBucketObjectLockConfiguration")
GetBucketOwnershipControls = Action("GetBucketOwnershipControls")
GetBucketPolicy = Action("GetBucketPolicy")
GetBucketPolicyStatus = Action("GetBucketPolicyStatus")
GetBucketPublicAccessBlock = Action("GetBucketPublicAccessBlock")
GetBucketRequestPayment = Action("GetBucketRequestPayment")
GetBucketTagging = Action("GetBucketTagging")
GetBucketVersioning = Action("GetBucketVersioning")
GetBucketWebsite = Action("GetBucketWebsite")
GetEncryptionConfiguration = Action("GetEncryptionConfiguration")
GetIntelligentTieringConfiguration = Action("GetIntelligentTieringConfiguration")
GetInventoryConfiguration = Action("GetInventoryConfiguration")
GetIpConfiguration = Action("GetIpConfiguration")
GetJobTagging = Action("GetJobTagging")
GetLifecycleConfiguration = Action("GetLifecycleConfiguration")
GetMetricsConfiguration = Action("GetMetricsConfiguration")
GetObject = Action("GetObject")
GetObjectAcl = Action("GetObjectAcl")
GetObjectLegalHold = Action("GetObjectLegalHold")
GetObjectRetention = Action("GetObjectRetention")
GetObjectTagging = Action("GetObjectTagging")
GetObjectTorrent = Action("GetObjectTorrent")
GetObjectVersion = Action("GetObjectVersion")
GetObjectVersionAcl = Action("GetObjectVersionAcl")
GetObjectVersionForReplication = Action("GetObjectVersionForReplication")
GetObjectVersionTagging = Action("GetObjectVersionTagging")
GetObjectVersionTorrent = Action("GetObjectVersionTorrent")
GetReplicationConfiguration = Action("GetReplicationConfiguration")
GetStorageLensConfiguration = Action("GetStorageLensConfiguration")
GetStorageLensConfigurationTagging = Action("GetStorageLensConfigurationTagging")
GetStorageLensDashboard = Action("GetStorageLensDashboard")
HeadBucket = Action("HeadBucket")
ListAccessPoints = Action("ListAccessPoints")
ListAccessPointsForObjectLambda = Action("ListAccessPointsForObjectLambda")
ListAllMyBuckets = Action("ListAllMyBuckets")
ListBucket = Action("ListBucket")
ListBucketByTags = Action("ListBucketByTags")
ListBucketMultipartUploads = Action("ListBucketMultipartUploads")
ListBucketVersions = Action("ListBucketVersions")
ListJobs = Action("ListJobs")
ListMultipartUploadParts = Action("ListMultipartUploadParts")
ListObjects = Action("ListObjects")
ListStorageLensConfigurations = Action("ListStorageLensConfigurations")
ObjectOwnerOverrideToBucketOwner = Action("ObjectOwnerOverrideToBucketOwner")
PutAccelerateConfiguration = Action("PutAccelerateConfiguration")
PutAccessPointConfigurationForObjectLambda = Action(
"PutAccessPointConfigurationForObjectLambda"
)
PutAccessPointPolicy = Action("PutAccessPointPolicy")
PutAccessPointPolicyForObjectLambda = Action("PutAccessPointPolicyForObjectLambda")
PutAccountPublicAccessBlock = Action("PutAccountPublicAccessBlock")
PutAnalyticsConfiguration = Action("PutAnalyticsConfiguration")
PutBucketAcl = Action("PutBucketAcl")
PutBucketCORS = Action("PutBucketCORS")
PutBucketLogging = Action("PutBucketLogging")
PutBucketNotification = Action("PutBucketNotification")
PutBucketObjectLockConfiguration = Action("PutBucketObjectLockConfiguration")
PutBucketOwnershipControls = Action("PutBucketOwnershipControls")
PutBucketPolicy = Action("PutBucketPolicy")
PutBucketPublicAccessBlock = Action("PutBucketPublicAccessBlock")
PutBucketRequestPayment = Action("PutBucketRequestPayment")
PutBucketTagging = Action("PutBucketTagging")
PutBucketVersioning = Action("PutBucketVersioning")
PutBucketWebsite = Action("PutBucketWebsite")
PutEncryptionConfiguration = Action("PutEncryptionConfiguration")
PutIntelligentTieringConfiguration = Action("PutIntelligentTieringConfiguration")
PutInventoryConfiguration = Action("PutInventoryConfiguration")
PutIpConfiguration = Action("PutIpConfiguration")
PutJobTagging = Action("PutJobTagging")
PutLifecycleConfiguration = Action("PutLifecycleConfiguration")
PutMetricsConfiguration = Action("PutMetricsConfiguration")
PutObject = Action("PutObject")
PutObjectAcl = Action("PutObjectAcl")
PutObjectLegalHold = Action("PutObjectLegalHold")
PutObjectRetention = Action("PutObjectRetention")
PutObjectTagging = Action("PutObjectTagging")
PutObjectVersionAcl = Action("PutObjectVersionAcl")
PutObjectVersionTagging = Action("PutObjectVersionTagging")
PutReplicationConfiguration = Action("PutReplicationConfiguration")
PutStorageLensConfiguration = Action("PutStorageLensConfiguration")
PutStorageLensConfigurationTagging = Action("PutStorageLensConfigurationTagging")
ReplicateDelete = Action("ReplicateDelete")
ReplicateObject = Action("ReplicateObject")
ReplicateTags = Action("ReplicateTags")
RestoreObject = Action("RestoreObject")
UpdateJobPriority = Action("UpdateJobPriority")
UpdateJobStatus = Action("UpdateJobStatus")
|
bsd-2-clause
| 387,117,598,995,897,500 | 47.936306 | 88 | 0.833398 | false |
eayunstack/neutron
|
neutron/pecan_wsgi/app.py
|
1
|
1841
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from neutron.pecan_wsgi.controllers import root
from neutron.pecan_wsgi import hooks
from neutron.pecan_wsgi import startup
def versions_factory(global_config, **local_config):
return pecan.make_app(root.RootController())
def v2_factory(global_config, **local_config):
# Processing Order:
# As request enters lower priority called before higher.
# Reponse from controller is passed from higher priority to lower.
app_hooks = [
hooks.UserFilterHook(), # priority 90
hooks.ContextHook(), # priority 95
hooks.ExceptionTranslationHook(), # priority 100
hooks.BodyValidationHook(), # priority 120
hooks.OwnershipValidationHook(), # priority 125
hooks.QuotaEnforcementHook(), # priority 130
hooks.NotifierHook(), # priority 135
hooks.QueryParametersHook(), # priority 139
hooks.PolicyHook(), # priority 140
]
app = pecan.make_app(root.V2Controller(),
debug=False,
force_canonical=False,
hooks=app_hooks,
guess_content_type_from_ext=True)
startup.initialize_all()
return app
|
apache-2.0
| -6,988,175,976,737,177,000 | 37.354167 | 78 | 0.668658 | false |
openstack/nova-solver-scheduler
|
nova_solverscheduler/tests/scheduler/solvers/test_solvers.py
|
1
|
3383
|
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova_solverscheduler.scheduler import solvers
from nova_solverscheduler.scheduler.solvers import constraints
from nova_solverscheduler.scheduler.solvers import costs
from nova_solverscheduler import solver_scheduler_exception as exception
class FakeCost1(costs.BaseCost):
def get_components(self, variables, hosts, filter_properties):
pass
class FakeCost2(costs.BaseCost):
def get_components(self, variables, hosts, filter_properties):
pass
class FakeConstraint1(constraints.BaseConstraint):
def get_components(self, variables, hosts, filter_properties):
pass
class FakeConstraint2(constraints.BaseConstraint):
def get_components(self, variables, hosts, filter_properties):
pass
class TestBaseHostSolver(test.NoDBTestCase):
"""Test case for scheduler base solver."""
def setUp(self):
super(TestBaseHostSolver, self).setUp()
self.solver = solvers.BaseHostSolver()
@mock.patch.object(costs.CostHandler, 'get_all_classes')
def test_get_cost_classes_normal(self, getcls):
self.flags(scheduler_solver_costs=['FakeCost1'],
group='solver_scheduler')
getcls.return_value = [FakeCost1, FakeCost2]
cost_classes = self.solver._get_cost_classes()
self.assertIn(FakeCost1, cost_classes)
self.assertNotIn(FakeCost2, cost_classes)
@mock.patch.object(costs.CostHandler, 'get_all_classes')
def test_get_cost_classes_not_found(self, getcls):
self.flags(scheduler_solver_costs=['FakeUnknownCost'],
group='solver_scheduler')
getcls.return_value = [FakeCost1, FakeCost2]
self.assertRaises(exception.SchedulerSolverCostNotFound,
self.solver._get_cost_classes)
@mock.patch.object(constraints.ConstraintHandler, 'get_all_classes')
def test_get_constraint_classes_normal(self, getcls):
self.flags(scheduler_solver_constraints=['FakeConstraint1'],
group='solver_scheduler')
getcls.return_value = [FakeConstraint1, FakeConstraint2]
constraint_classes = self.solver._get_constraint_classes()
self.assertIn(FakeConstraint1, constraint_classes)
self.assertNotIn(FakeConstraint2, constraint_classes)
@mock.patch.object(constraints.ConstraintHandler, 'get_all_classes')
def test_get_constraint_classes_not_found(self, getcls):
self.flags(scheduler_solver_constraints=['FakeUnknownConstraint'],
group='solver_scheduler')
getcls.return_value = [FakeConstraint1, FakeConstraint2]
self.assertRaises(exception.SchedulerSolverConstraintNotFound,
self.solver._get_constraint_classes)
|
apache-2.0
| -1,885,574,219,722,357,000 | 39.27381 | 78 | 0.71209 | false |
kpbochenek/empireofcode
|
call_base.py
|
1
|
1180
|
# [email protected]
def total_cost(calls):
result = 0
for c in calls:
length = int(c.split(' ')[2])
mins = int(round(length / 60, 0))
if mins > 100:
result += (mins - 100) * 2
mins = min(100, mins - 100)
result += mins
print(result)
return result
if __name__ == '__main__':
# These "asserts" using for checking and not necessary for auto-testing
assert total_cost(("2014-01-01 01:12:13 181",
"2014-01-02 20:11:10 600",
"2014-01-03 01:12:13 6009",
"2014-01-03 12:13:55 200")) == 124, "Base example"
assert total_cost(("2014-02-05 01:00:00 1",
"2014-02-05 02:00:00 1",
"2014-02-05 03:00:00 1",
"2014-02-05 04:00:00 1")) == 4, "Short calls but money"
assert total_cost(("2014-02-05 01:00:00 60",
"2014-02-05 02:00:00 60",
"2014-02-05 03:00:00 60",
"2014-02-05 04:00:00 6000")) == 106, "Precise calls"
print("All set? Click 'Check' to review your code and earn rewards!")
|
apache-2.0
| 791,751,094,155,661,700 | 34.757576 | 78 | 0.482203 | false |
cisco-oss-eng/Cloud99
|
cloud99/loaders/__init__.py
|
1
|
2556
|
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import pykka
from cloud99.logging_setup import LOGGER
class TaskStatus(object):
INIT = "init"
STOPPED = "stopped"
ABORTED = "aborted"
FINISHED = "finished"
class BaseLoader(pykka.ThreadingActor):
def __init__(self, observer, openrc, inventory, **params):
# args kept to match signature
super(BaseLoader, self).__init__()
self.observer = observer
self.task_status = TaskStatus.INIT
self.runner_thread = None
self.checker_thread = None
self.times = 0
def on_receive(self, message):
msg = message.get('msg')
params = message.get("params")
if msg == 'validate_config':
self.validate_config()
if msg == 'start':
self.execute(params)
if msg == "stop_task":
self.abort()
if msg == 'stop':
self.stop()
def abort(self):
self.task_status = TaskStatus.ABORTED
self.wait_for_threads()
self.observer.tell({'msg': 'loader_finished', "times": self.times})
def stop(self):
self.task_status = TaskStatus.STOPPED
self.wait_for_threads()
super(BaseLoader, self).stop()
def wait_for_threads(self):
if self.runner_thread:
self.runner_thread.join()
if self.checker_thread:
self.checker_thread.join()
self.reset()
def reset(self):
self.runner_thread = None
self.checker_thread = None
self.task_status = TaskStatus.INIT
def on_failure(self, exception_type, exception_value, traceback):
LOGGER.error(exception_type, exception_value, traceback)
@abc.abstractmethod
def validate_config(self):
""""""
@abc.abstractmethod
def execute(self, params=None):
""" """
@abc.abstractmethod
def load(self, **params):
""""""
@abc.abstractmethod
def check(self, **params):
""" """
|
apache-2.0
| 4,718,103,721,506,559,000 | 28.045455 | 75 | 0.625978 | false |
hivam/doctor_psychology
|
models/doctor_hc_report_psicologia_inherit.py
|
1
|
1378
|
# -*- encoding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
import base64
import sys, os
class doctor_hc_report_inherit(osv.osv):
_inherit = "doctor.list_report"
_columns = {
'attentions_psychology_ids': fields.one2many('doctor.psicologia', 'list_report_print_spicologia_id', 'Attentions'),
}
doctor_hc_report_inherit()
|
agpl-3.0
| -6,713,229,931,846,211,000 | 36.243243 | 117 | 0.640058 | false |
thp44/delphin_6_automation
|
data_process/2d_1d/archieve/moisture_content_comparison.py
|
1
|
18274
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import matplotlib.pyplot as plt
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A'
hdf_file = out_folder + '/relative_moisture_content.h5'
# Open HDF
# Uninsulated
dresdenzp_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_uninsulated_4a')
dresdenzd_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_uninsulated_4a')
postdam_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_uninsulated_4a')
dresdenzp_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_uninsulated_4a')
dresdenzd_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_uninsulated_4a')
postdam_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_uninsulated_4a')
total_uninsulated_4a = pd.concat([dresdenzp_highratio_uninsulated_4a, dresdenzd_highratio_uninsulated_4a,
postdam_highratio_uninsulated_4a, dresdenzp_lowratio_uninsulated_4a,
dresdenzd_lowratio_uninsulated_4a, postdam_lowratio_uninsulated_4a])
# Insulated
dresdenzp_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_insulated_4a')
dresdenzd_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_insulated_4a')
postdam_highratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_insulated_4a')
dresdenzp_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_insulated_4a')
dresdenzd_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_insulated_4a')
postdam_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_insulated_4a')
total_insulated_4a = pd.concat([dresdenzp_highratio_insulated_4a, dresdenzd_highratio_insulated_4a,
postdam_highratio_insulated_4a, dresdenzp_lowratio_insulated_4a,
dresdenzd_lowratio_insulated_4a, postdam_lowratio_insulated_4a])
def plots(plot, save=False):
"""
Creates box plots from all the wall scenarios
"""
if plot == 'uninsulated' or plot == 'all':
plt.figure('dresdenzp_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_uninsulated_4a_moisture")
plt.figure('postdam_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_uninsulated_4a_moisture")
plt.figure('dresdenzp_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_uninsulated_4a_moisture")
plt.figure('dresdenzd_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_uninsulated_4a_moisture")
plt.figure('postdam_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_uninsulated_4a_moisture")
plt.figure('total_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 1100)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/total_uninsulated_4a_moisture")
if plot == 'insulated' or plot == 'all':
plt.figure('dresdenzp_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_insulated_4a_moisture")
plt.figure('dresdenzd_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_insulated_4a_moisture")
plt.figure('postdam_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_insulated_4a_moisture")
plt.figure('dresdenzp_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_insulated_4a_moisture")
plt.figure('dresdenzd_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_insulated_4a_moisture")
plt.figure('postdam_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
postdam_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_insulated_4a_moisture")
plt.figure('total_insulated_4a_moisture', figsize=(16, 8), tight_layout=True)
total_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 2000)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Moisture Content\n'
'Brick: All - Mortar: All - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/total_insulated_4a_moisture")
plt.show()
plots('all', False)
def std3_ratio(print_=False, excel=False):
"""Computes ratio of outliers in the data sets. Outliers is here defined as data points deviating with more
the 3 standard deviations from the mean."""
std3_uninsulated_ratio_ = uninsulated()
std3_insulated_ratio_ = insulated()
if print_:
print('Uninsulated')
print(std3_uninsulated_ratio_)
print('')
print('Insulated')
print(std3_insulated_ratio_)
if excel:
writer = pd.ExcelWriter(f'{out_folder}/moisture_std_ratios.xlsx')
std3_uninsulated_ratio_.to_excel(writer, 'Uninsulated')
std3_insulated_ratio_.to_excel(writer, 'Insulated')
writer.save()
def uninsulated():
"""Computes the outliers for the uninsulated cases"""
outliers_total_uninsulated = (total_uninsulated_4a.shape[0] -
total_uninsulated_4a.sub(total_uninsulated_4a.mean())
.div(total_uninsulated_4a.std()).abs().lt(3).sum()) / total_uninsulated_4a.shape[0]
outliers_zd_high_uninsulated = (dresdenzd_highratio_uninsulated_4a.shape[0] -
dresdenzd_highratio_uninsulated_4a.sub(dresdenzd_highratio_uninsulated_4a.mean())
.div(dresdenzd_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_uninsulated_4a.shape[0]
outliers_zp_high_uninsulated = (dresdenzp_highratio_uninsulated_4a.shape[0] -
dresdenzp_highratio_uninsulated_4a.sub(dresdenzp_highratio_uninsulated_4a.mean())
.div(dresdenzp_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_uninsulated_4a.shape[0]
outliers_pd_high_uninsulated = (postdam_highratio_uninsulated_4a.shape[0] -
postdam_highratio_uninsulated_4a.sub(postdam_highratio_uninsulated_4a.mean())
.div(postdam_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_uninsulated_4a.shape[0]
outliers_zd_low_uninsulated = (dresdenzd_lowratio_uninsulated_4a.shape[0] -
dresdenzd_lowratio_uninsulated_4a.sub(dresdenzd_lowratio_uninsulated_4a.mean())
.div(dresdenzd_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_uninsulated_4a.shape[0]
outliers_zp_low_uninsulated = (dresdenzp_lowratio_uninsulated_4a.shape[0] -
dresdenzp_lowratio_uninsulated_4a.sub(dresdenzp_lowratio_uninsulated_4a.mean())
.div(dresdenzp_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_uninsulated_4a.shape[0]
outliers_pd_low_uninsulated = (postdam_lowratio_uninsulated_4a.shape[0] -
postdam_lowratio_uninsulated_4a.sub(postdam_lowratio_uninsulated_4a.mean())
.div(postdam_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_uninsulated_4a.shape[0]
outliers_uninsulated_ratio_ = pd.concat([outliers_total_uninsulated, outliers_zd_high_uninsulated,
outliers_zp_high_uninsulated, outliers_pd_high_uninsulated,
outliers_zd_low_uninsulated, outliers_zp_low_uninsulated,
outliers_pd_low_uninsulated], axis=1)
outliers_uninsulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None"]
return outliers_uninsulated_ratio_
def insulated():
"""Computes the outliers for the insulated cases"""
outliers_total_insulated = (total_insulated_4a.shape[0] - total_insulated_4a.sub(total_insulated_4a.mean())
.div(total_insulated_4a.std()).abs().lt(3).sum()) / total_insulated_4a.shape[0]
outliers_zd_high_insulated = (dresdenzd_highratio_insulated_4a.shape[0] -
dresdenzd_highratio_insulated_4a.sub(dresdenzd_highratio_insulated_4a.mean())
.div(dresdenzd_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_insulated_4a.shape[0]
outliers_zp_high_insulated = (dresdenzp_highratio_insulated_4a.shape[0] -
dresdenzp_highratio_insulated_4a.sub(dresdenzp_highratio_insulated_4a.mean())
.div(dresdenzp_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_insulated_4a.shape[0]
outliers_pd_high_insulated = (postdam_highratio_insulated_4a.shape[0] -
postdam_highratio_insulated_4a.sub(postdam_highratio_insulated_4a.mean())
.div(postdam_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_insulated_4a.shape[0]
outliers_zd_low_insulated = (dresdenzd_lowratio_insulated_4a.shape[0] -
dresdenzd_lowratio_insulated_4a.sub(dresdenzd_lowratio_insulated_4a.mean())
.div(dresdenzd_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_insulated_4a.shape[0]
outliers_zp_low_insulated = (dresdenzp_lowratio_insulated_4a.shape[0] -
dresdenzp_lowratio_insulated_4a.sub(dresdenzp_lowratio_insulated_4a.mean())
.div(dresdenzp_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_insulated_4a.shape[0]
outliers_pd_low_insulated = (postdam_lowratio_insulated_4a.shape[0] -
postdam_lowratio_insulated_4a.sub(postdam_lowratio_insulated_4a.mean())
.div(postdam_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_insulated_4a.shape[0]
std2_insulated_ratio_ = pd.concat([outliers_total_insulated, outliers_zd_high_insulated,
outliers_zp_high_insulated, outliers_pd_high_insulated,
outliers_zd_low_insulated, outliers_zp_low_insulated,
outliers_pd_low_insulated], axis=1)
std2_insulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate"]
return std2_insulated_ratio_
#std3_ratio(False, True)
|
mit
| -2,501,209,804,834,219,500 | 55.575851 | 120 | 0.600088 | false |
intel-hadoop/Big-Data-Benchmark-for-Big-Bench
|
engines/hive/queries/q08/q08_filter_sales_with_reviews_viewed_before.py
|
1
|
3144
|
#"INTEL CONFIDENTIAL"
#Copyright 2016 Intel Corporation All Rights Reserved.
#
#The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission.
#
#No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.
import sys
import logging
import traceback
import os
import time
from time import strftime
web_page_type_filter=sys.argv[1]
seconds_before_sale_filter = long(sys.argv[2])
if __name__ == "__main__":
line = ''
try:
current_key = ''
last_review_date=-1
#sales_sk should be distinct
last_sales_sk = ''
#expects input to be partitioned by uid and sorted by date_sk (and timestamp) ascending
for line in sys.stdin:
# lustered by wcs_user_sk and by wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type ascending in this order => ensured by hive
wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type = line.strip().split("\t")
#reset on partition change
if current_key != wcs_user_sk :
current_key = wcs_user_sk
last_review_date = -1
last_sales_sk = ''
tstamp_inSec = long(tstamp_inSec_str)
#found review before purchase, save last review date
if wp_type == web_page_type_filter:
last_review_date = tstamp_inSec
continue
#if we encounter a sold item ( wcs_sales_sk.isdigit() => valid non null value) and a user looked at a review within 'seconds_before_sale_filter' => print found sales_sk backt to hive
#if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() : #version with duplicate sales_sk's
if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() and last_sales_sk != wcs_sales_sk : #version reduced duplicate sales_sk's
last_sales_sk = wcs_sales_sk
print wcs_sales_sk
except:
## should only happen if input format is not correct, like 4 instead of 5 tab separated values
logging.basicConfig(level=logging.DEBUG, filename=strftime("/tmp/bigbench_q8_reducer_%Y%m%d-%H%M%S.log"))
logging.info('web_page_type_filter: ' + web_page_type_filter )
logging.info('seconds_before_sale_filter: ' + seconds_before_sale_filter )
logging.info("line from hive: \"" + line + "\"")
logging.exception("Oops:")
raise
sys.exit(1)
|
apache-2.0
| -7,692,260,263,573,723,000 | 51.4 | 663 | 0.735687 | false |
picklepete/pyicloud
|
pyicloud/services/photos.py
|
1
|
21937
|
"""Photo service."""
import json
import base64
from six import PY2
# fmt: off
from six.moves.urllib.parse import urlencode # pylint: disable=bad-option-value,relative-import
# fmt: on
from datetime import datetime
from pyicloud.exceptions import PyiCloudServiceNotActivatedException
from pytz import UTC
class PhotosService(object):
"""The 'Photos' iCloud service."""
SMART_FOLDERS = {
"All Photos": {
"obj_type": "CPLAssetByAddedDate",
"list_type": "CPLAssetAndMasterByAddedDate",
"direction": "ASCENDING",
"query_filter": None,
},
"Time-lapse": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Timelapse",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "TIMELAPSE"},
}
],
},
"Videos": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Video",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "VIDEO"},
}
],
},
"Slo-mo": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Slomo",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "SLOMO"},
}
],
},
"Bursts": {
"obj_type": "CPLAssetBurstStackAssetByAssetDate",
"list_type": "CPLBurstStackAssetAndMasterByAssetDate",
"direction": "ASCENDING",
"query_filter": None,
},
"Favorites": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Favorite",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "FAVORITE"},
}
],
},
"Panoramas": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Panorama",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "PANORAMA"},
}
],
},
"Screenshots": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Screenshot",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "SCREENSHOT"},
}
],
},
"Live": {
"obj_type": "CPLAssetInSmartAlbumByAssetDate:Live",
"list_type": "CPLAssetAndMasterInSmartAlbumByAssetDate",
"direction": "ASCENDING",
"query_filter": [
{
"fieldName": "smartAlbum",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": "LIVE"},
}
],
},
"Recently Deleted": {
"obj_type": "CPLAssetDeletedByExpungedDate",
"list_type": "CPLAssetAndMasterDeletedByExpungedDate",
"direction": "ASCENDING",
"query_filter": None,
},
"Hidden": {
"obj_type": "CPLAssetHiddenByAssetDate",
"list_type": "CPLAssetAndMasterHiddenByAssetDate",
"direction": "ASCENDING",
"query_filter": None,
},
}
def __init__(self, service_root, session, params):
self.session = session
self.params = dict(params)
self._service_root = service_root
self.service_endpoint = (
"%s/database/1/com.apple.photos.cloud/production/private"
% self._service_root
)
self._albums = None
self.params.update({"remapEnums": True, "getCurrentSyncToken": True})
url = "%s/records/query?%s" % (self.service_endpoint, urlencode(self.params))
json_data = (
'{"query":{"recordType":"CheckIndexingState"},'
'"zoneID":{"zoneName":"PrimarySync"}}'
)
request = self.session.post(
url, data=json_data, headers={"Content-type": "text/plain"}
)
response = request.json()
indexing_state = response["records"][0]["fields"]["state"]["value"]
if indexing_state != "FINISHED":
raise PyiCloudServiceNotActivatedException(
"iCloud Photo Library not finished indexing. "
"Please try again in a few minutes."
)
# TODO: Does syncToken ever change? # pylint: disable=fixme
# self.params.update({
# 'syncToken': response['syncToken'],
# 'clientInstanceId': self.params.pop('clientId')
# })
self._photo_assets = {}
@property
def albums(self):
"""Returns photo albums."""
if not self._albums:
self._albums = {
name: PhotoAlbum(self, name, **props)
for (name, props) in self.SMART_FOLDERS.items()
}
for folder in self._fetch_folders():
# Skiping albums having null name, that can happen sometime
if "albumNameEnc" not in folder["fields"]:
continue
# TODO: Handle subfolders # pylint: disable=fixme
if folder["recordName"] == "----Root-Folder----" or (
folder["fields"].get("isDeleted")
and folder["fields"]["isDeleted"]["value"]
):
continue
folder_id = folder["recordName"]
folder_obj_type = (
"CPLContainerRelationNotDeletedByAssetDate:%s" % folder_id
)
folder_name = base64.b64decode(
folder["fields"]["albumNameEnc"]["value"]
).decode("utf-8")
query_filter = [
{
"fieldName": "parentId",
"comparator": "EQUALS",
"fieldValue": {"type": "STRING", "value": folder_id},
}
]
album = PhotoAlbum(
self,
folder_name,
"CPLContainerRelationLiveByAssetDate",
folder_obj_type,
"ASCENDING",
query_filter,
)
self._albums[folder_name] = album
return self._albums
def _fetch_folders(self):
url = "%s/records/query?%s" % (self.service_endpoint, urlencode(self.params))
json_data = (
'{"query":{"recordType":"CPLAlbumByPositionLive"},'
'"zoneID":{"zoneName":"PrimarySync"}}'
)
request = self.session.post(
url, data=json_data, headers={"Content-type": "text/plain"}
)
response = request.json()
return response["records"]
@property
def all(self):
"""Returns all photos."""
return self.albums["All Photos"]
class PhotoAlbum(object):
"""A photo album."""
def __init__(
self,
service,
name,
list_type,
obj_type,
direction,
query_filter=None,
page_size=100,
):
self.name = name
self.service = service
self.list_type = list_type
self.obj_type = obj_type
self.direction = direction
self.query_filter = query_filter
self.page_size = page_size
self._len = None
@property
def title(self):
"""Gets the album name."""
return self.name
def __iter__(self):
return self.photos
def __len__(self):
if self._len is None:
url = "%s/internal/records/query/batch?%s" % (
self.service.service_endpoint,
urlencode(self.service.params),
)
request = self.service.session.post(
url,
data=json.dumps(
{
u"batch": [
{
u"resultsLimit": 1,
u"query": {
u"filterBy": {
u"fieldName": u"indexCountID",
u"fieldValue": {
u"type": u"STRING_LIST",
u"value": [self.obj_type],
},
u"comparator": u"IN",
},
u"recordType": u"HyperionIndexCountLookup",
},
u"zoneWide": True,
u"zoneID": {u"zoneName": u"PrimarySync"},
}
]
}
),
headers={"Content-type": "text/plain"},
)
response = request.json()
self._len = response["batch"][0]["records"][0]["fields"]["itemCount"][
"value"
]
return self._len
@property
def photos(self):
"""Returns the album photos."""
if self.direction == "DESCENDING":
offset = len(self) - 1
else:
offset = 0
while True:
url = ("%s/records/query?" % self.service.service_endpoint) + urlencode(
self.service.params
)
request = self.service.session.post(
url,
data=json.dumps(
self._list_query_gen(
offset, self.list_type, self.direction, self.query_filter
)
),
headers={"Content-type": "text/plain"},
)
response = request.json()
asset_records = {}
master_records = []
for rec in response["records"]:
if rec["recordType"] == "CPLAsset":
master_id = rec["fields"]["masterRef"]["value"]["recordName"]
asset_records[master_id] = rec
elif rec["recordType"] == "CPLMaster":
master_records.append(rec)
master_records_len = len(master_records)
if master_records_len:
if self.direction == "DESCENDING":
offset = offset - master_records_len
else:
offset = offset + master_records_len
for master_record in master_records:
record_name = master_record["recordName"]
yield PhotoAsset(
self.service, master_record, asset_records[record_name]
)
else:
break
def _list_query_gen(self, offset, list_type, direction, query_filter=None):
query = {
u"query": {
u"filterBy": [
{
u"fieldName": u"startRank",
u"fieldValue": {u"type": u"INT64", u"value": offset},
u"comparator": u"EQUALS",
},
{
u"fieldName": u"direction",
u"fieldValue": {u"type": u"STRING", u"value": direction},
u"comparator": u"EQUALS",
},
],
u"recordType": list_type,
},
u"resultsLimit": self.page_size * 2,
u"desiredKeys": [
u"resJPEGFullWidth",
u"resJPEGFullHeight",
u"resJPEGFullFileType",
u"resJPEGFullFingerprint",
u"resJPEGFullRes",
u"resJPEGLargeWidth",
u"resJPEGLargeHeight",
u"resJPEGLargeFileType",
u"resJPEGLargeFingerprint",
u"resJPEGLargeRes",
u"resJPEGMedWidth",
u"resJPEGMedHeight",
u"resJPEGMedFileType",
u"resJPEGMedFingerprint",
u"resJPEGMedRes",
u"resJPEGThumbWidth",
u"resJPEGThumbHeight",
u"resJPEGThumbFileType",
u"resJPEGThumbFingerprint",
u"resJPEGThumbRes",
u"resVidFullWidth",
u"resVidFullHeight",
u"resVidFullFileType",
u"resVidFullFingerprint",
u"resVidFullRes",
u"resVidMedWidth",
u"resVidMedHeight",
u"resVidMedFileType",
u"resVidMedFingerprint",
u"resVidMedRes",
u"resVidSmallWidth",
u"resVidSmallHeight",
u"resVidSmallFileType",
u"resVidSmallFingerprint",
u"resVidSmallRes",
u"resSidecarWidth",
u"resSidecarHeight",
u"resSidecarFileType",
u"resSidecarFingerprint",
u"resSidecarRes",
u"itemType",
u"dataClassType",
u"filenameEnc",
u"originalOrientation",
u"resOriginalWidth",
u"resOriginalHeight",
u"resOriginalFileType",
u"resOriginalFingerprint",
u"resOriginalRes",
u"resOriginalAltWidth",
u"resOriginalAltHeight",
u"resOriginalAltFileType",
u"resOriginalAltFingerprint",
u"resOriginalAltRes",
u"resOriginalVidComplWidth",
u"resOriginalVidComplHeight",
u"resOriginalVidComplFileType",
u"resOriginalVidComplFingerprint",
u"resOriginalVidComplRes",
u"isDeleted",
u"isExpunged",
u"dateExpunged",
u"remappedRef",
u"recordName",
u"recordType",
u"recordChangeTag",
u"masterRef",
u"adjustmentRenderType",
u"assetDate",
u"addedDate",
u"isFavorite",
u"isHidden",
u"orientation",
u"duration",
u"assetSubtype",
u"assetSubtypeV2",
u"assetHDRType",
u"burstFlags",
u"burstFlagsExt",
u"burstId",
u"captionEnc",
u"locationEnc",
u"locationV2Enc",
u"locationLatitude",
u"locationLongitude",
u"adjustmentType",
u"timeZoneOffset",
u"vidComplDurValue",
u"vidComplDurScale",
u"vidComplDispValue",
u"vidComplDispScale",
u"vidComplVisibilityState",
u"customRenderedValue",
u"containerId",
u"itemId",
u"position",
u"isKeyAsset",
],
u"zoneID": {u"zoneName": u"PrimarySync"},
}
if query_filter:
query["query"]["filterBy"].extend(query_filter)
return query
def __unicode__(self):
return self.title
def __str__(self):
as_unicode = self.__unicode__()
if PY2:
return as_unicode.encode("utf-8", "ignore")
return as_unicode
def __repr__(self):
return "<%s: '%s'>" % (type(self).__name__, self)
class PhotoAsset(object):
"""A photo."""
def __init__(self, service, master_record, asset_record):
self._service = service
self._master_record = master_record
self._asset_record = asset_record
self._versions = None
PHOTO_VERSION_LOOKUP = {
u"original": u"resOriginal",
u"medium": u"resJPEGMed",
u"thumb": u"resJPEGThumb",
}
VIDEO_VERSION_LOOKUP = {
u"original": u"resOriginal",
u"medium": u"resVidMed",
u"thumb": u"resVidSmall",
}
@property
def id(self):
"""Gets the photo id."""
return self._master_record["recordName"]
@property
def filename(self):
"""Gets the photo file name."""
return base64.b64decode(
self._master_record["fields"]["filenameEnc"]["value"]
).decode("utf-8")
@property
def size(self):
"""Gets the photo size."""
return self._master_record["fields"]["resOriginalRes"]["value"]["size"]
@property
def created(self):
"""Gets the photo created date."""
return self.asset_date
@property
def asset_date(self):
"""Gets the photo asset date."""
try:
return datetime.fromtimestamp(
self._asset_record["fields"]["assetDate"]["value"] / 1000.0, tz=UTC
)
except KeyError:
return datetime.fromtimestamp(0)
@property
def added_date(self):
"""Gets the photo added date."""
return datetime.fromtimestamp(
self._asset_record["fields"]["addedDate"]["value"] / 1000.0, tz=UTC
)
@property
def dimensions(self):
"""Gets the photo dimensions."""
return (
self._master_record["fields"]["resOriginalWidth"]["value"],
self._master_record["fields"]["resOriginalHeight"]["value"],
)
@property
def versions(self):
"""Gets the photo versions."""
if not self._versions:
self._versions = {}
if "resVidSmallRes" in self._master_record["fields"]:
typed_version_lookup = self.VIDEO_VERSION_LOOKUP
else:
typed_version_lookup = self.PHOTO_VERSION_LOOKUP
for key, prefix in typed_version_lookup.items():
if "%sRes" % prefix in self._master_record["fields"]:
fields = self._master_record["fields"]
version = {"filename": self.filename}
width_entry = fields.get("%sWidth" % prefix)
if width_entry:
version["width"] = width_entry["value"]
else:
version["width"] = None
height_entry = fields.get("%sHeight" % prefix)
if height_entry:
version["height"] = height_entry["value"]
else:
version["height"] = None
size_entry = fields.get("%sRes" % prefix)
if size_entry:
version["size"] = size_entry["value"]["size"]
version["url"] = size_entry["value"]["downloadURL"]
else:
version["size"] = None
version["url"] = None
type_entry = fields.get("%sFileType" % prefix)
if type_entry:
version["type"] = type_entry["value"]
else:
version["type"] = None
self._versions[key] = version
return self._versions
def download(self, version="original", **kwargs):
"""Returns the photo file."""
if version not in self.versions:
return None
return self._service.session.get(
self.versions[version]["url"], stream=True, **kwargs
)
def delete(self):
"""Deletes the photo."""
json_data = (
'{"query":{"recordType":"CheckIndexingState"},'
'"zoneID":{"zoneName":"PrimarySync"}}'
)
json_data = (
'{"operations":[{'
'"operationType":"update",'
'"record":{'
'"recordName":"%s",'
'"recordType":"%s",'
'"recordChangeTag":"%s",'
'"fields":{"isDeleted":{"value":1}'
"}}}],"
'"zoneID":{'
'"zoneName":"PrimarySync"'
'},"atomic":true}'
% (
self._asset_record["recordName"],
self._asset_record["recordType"],
self._master_record["recordChangeTag"],
)
)
endpoint = self._service.service_endpoint
params = urlencode(self._service.params)
url = "%s/records/modify?%s" % (endpoint, params)
return self._service.session.post(
url, data=json_data, headers={"Content-type": "text/plain"}
)
def __repr__(self):
return "<%s: id=%s>" % (type(self).__name__, self.id)
|
mit
| 4,992,312,043,741,052,000 | 32.801233 | 96 | 0.462506 | false |
dayatz/taiga-back
|
tests/integration/test_totals_projects.py
|
1
|
5868
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Anler Hernández <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2017 Taiga Agile LLC <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import datetime
from .. import factories as f
from taiga.projects.history.choices import HistoryType
from taiga.projects.models import Project
from django.core.urlresolvers import reverse
from django.utils import timezone
pytestmark = pytest.mark.django_db
def test_project_totals_updated_on_activity(client):
project = f.create_project()
totals_updated_datetime = project.totals_updated_datetime
now = timezone.now()
assert project.total_activity == 0
totals_updated_datetime = project.totals_updated_datetime
us = f.UserStoryFactory.create(project=project, owner=project.owner)
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=3)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 1
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 1
assert project.total_activity_last_year == 1
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=13)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 2
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 2
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=33)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 3
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=380)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 4
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
def test_project_totals_updated_on_like(client):
project = f.create_project()
f.MembershipFactory.create(project=project, user=project.owner, is_admin=True)
totals_updated_datetime = project.totals_updated_datetime
now = timezone.now()
assert project.total_activity == 0
now = timezone.now()
totals_updated_datetime = project.totals_updated_datetime
us = f.UserStoryFactory.create(project=project, owner=project.owner)
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=13)
l.save()
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=33)
l.save()
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=633)
l.save()
project.refresh_totals()
project = Project.objects.get(id=project.id)
assert project.total_fans == 3
assert project.total_fans_last_week == 0
assert project.total_fans_last_month == 1
assert project.total_fans_last_year == 2
assert project.totals_updated_datetime > totals_updated_datetime
client.login(project.owner)
url_like = reverse("projects-like", args=(project.id,))
response = client.post(url_like)
project = Project.objects.get(id=project.id)
assert project.total_fans == 4
assert project.total_fans_last_week == 1
assert project.total_fans_last_month == 2
assert project.total_fans_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
|
agpl-3.0
| -8,717,044,814,042,567,000 | 35.203704 | 82 | 0.701449 | false |
edx-solutions/discussion-edx-platform-extensions
|
social_engagement/engagement.py
|
1
|
14753
|
"""
Business logic tier regarding social engagement scores
"""
import logging
import sys
from collections import defaultdict
from datetime import datetime
import pytz
from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.http import HttpRequest
import openedx.core.djangoapps.django_comment_common.comment_client as cc
from edx_notifications.data import NotificationMessage
from edx_notifications.lib.publisher import (get_notification_type,
publish_notification_to_user)
from edx_solutions_api_integration.utils import get_aggregate_exclusion_user_ids
from lms.djangoapps.discussion.rest_api.exceptions import (CommentNotFoundError,
ThreadNotFoundError)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.django_comment_common.comment_client.user import get_course_social_stats
from openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientRequestError
from requests.exceptions import ConnectionError
from xmodule.modulestore.django import modulestore
from .models import StudentSocialEngagementScore
log = logging.getLogger(__name__)
def update_course_engagement(course_id, compute_if_closed_course=False, course_descriptor=None):
"""
Compute and save engagement scores and stats for whole course.
"""
if not settings.FEATURES.get('ENABLE_SOCIAL_ENGAGEMENT', False):
return
course_key = course_id if isinstance(course_id, CourseKey) else CourseKey.from_string(course_id)
# cs_comment_service works is slash separated course_id strings
slash_course_id = str(course_key)
if not course_descriptor:
# it course descriptor was not passed in (as an optimization)
course_descriptor = modulestore().get_course(course_key)
if not course_descriptor:
# couldn't find course?!?
return
if not compute_if_closed_course and course_descriptor.end:
# if course is closed then don't bother. Note we can override this if we want to force update
now_utc = datetime.now(pytz.UTC)
if now_utc > course_descriptor.end:
log.info('update_user_engagement_score() is skipping because the course is closed...')
return
score_update_count = 0
try:
for user_id, social_stats in _get_course_social_stats(slash_course_id):
log.info('Updating social engagement score for user_id {} in course_key {}'.format(user_id, course_key))
current_score = _compute_social_engagement_score(social_stats)
StudentSocialEngagementScore.save_user_engagement_score(
course_key, user_id, current_score, social_stats
)
score_update_count += 1
except (CommentClientRequestError, ConnectionError) as error:
log.exception(error)
return score_update_count
def _get_course_social_stats(course_id):
""""
Yield user and user's stats for whole course from Forum API.
"""
stats = get_course_social_stats(course_id)
yield from stats.items()
def get_social_metric_points():
"""
Get custom or default social metric points.
"""
return getattr(
settings,
'SOCIAL_METRIC_POINTS',
{
'num_threads': 10,
'num_comments': 15,
'num_replies': 15,
'num_upvotes': 25,
'num_thread_followers': 5,
'num_comments_generated': 15,
}
)
def _compute_social_engagement_score(social_metrics):
"""
For a list of social_stats, compute the social score
"""
social_metric_points = get_social_metric_points()
social_total = 0
for key, val in social_metric_points.items():
social_total += social_metrics.get(key, 0) * val
return social_total
#
# Support for Notifications, these two receivers should actually be migrated into a new Leaderboard django app.
# For now, put the business logic here, but it is pretty decoupled through event signaling
# so we should be able to move these files easily when we are able to do so
#
@receiver(pre_save, sender=StudentSocialEngagementScore)
def handle_progress_pre_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on StudentSocialEngagementScore
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
instance.presave_leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
@receiver(post_save, sender=StudentSocialEngagementScore)
def handle_progress_post_save_signal(sender, instance, **kwargs):
"""
Handle the pre-save ORM event on CourseModuleCompletions
"""
if settings.FEATURES['ENABLE_NOTIFICATIONS']:
# If notifications feature is enabled, then we need to get the user's
# rank before the save is made, so that we can compare it to
# after the save and see if the position changes
leaderboard_rank = StudentSocialEngagementScore.get_user_leaderboard_position(
instance.course_id,
user_id=instance.user.id,
exclude_users=get_aggregate_exclusion_user_ids(instance.course_id)
)['position']
if leaderboard_rank == 0:
# quick escape when user is not in the leaderboard
# which means rank = 0. Trouble is 0 < 3, so unfortunately
# the semantics around 0 don't match the logic below
return
# logic for Notification trigger is when a user enters into the Leaderboard
leaderboard_size = getattr(settings, 'LEADERBOARD_SIZE', 3)
presave_leaderboard_rank = instance.presave_leaderboard_rank if instance.presave_leaderboard_rank else sys.maxsize
if leaderboard_rank <= leaderboard_size and presave_leaderboard_rank > leaderboard_size:
try:
notification_msg = NotificationMessage(
msg_type=get_notification_type('open-edx.lms.leaderboard.engagement.rank-changed'),
namespace=str(instance.course_id),
payload={
'_schema_version': '1',
'rank': leaderboard_rank,
'leaderboard_name': 'Engagement',
}
)
#
# add in all the context parameters we'll need to
# generate a URL back to the website that will
# present the new course announcement
#
# IMPORTANT: This can be changed to msg.add_click_link() if we
# have a particular URL that we wish to use. In the initial use case,
# we need to make the link point to a different front end website
# so we need to resolve these links at dispatch time
#
notification_msg.add_click_link_params({
'course_id': str(instance.course_id),
})
publish_notification_to_user(int(instance.user.id), notification_msg)
except Exception as ex:
# Notifications are never critical, so we don't want to disrupt any
# other logic processing. So log and continue.
log.exception(ex)
def get_involved_users_in_thread(request, thread):
"""
Compute all the users involved in the children of a specific thread.
"""
params = {"thread_id": thread.id, "page_size": 100}
is_question = getattr(thread, "thread_type", None) == "question"
author_id = getattr(thread, 'user_id', None)
results = _detail_results_factory()
if is_question:
# get users of the non-endorsed comments in thread
params.update({"endorsed": False})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
# get users of the endorsed comments in thread
if getattr(thread, 'has_endorsed', False):
params.update({"endorsed": True})
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
else:
_get_details_for_deletion(_get_request(request, params), results=results, is_thread=True)
users = results['users']
if author_id:
users[author_id]['num_upvotes'] += thread.votes.get('count', 0)
users[author_id]['num_threads'] += 1
users[author_id]['num_comments_generated'] += results['all_comments']
users[author_id]['num_thread_followers'] += thread.get_num_followers()
if thread.abuse_flaggers:
users[author_id]['num_flagged'] += 1
return users
def get_involved_users_in_comment(request, comment):
"""
Method used to extract the involved users in the comment.
This method also returns the creator of the post.
"""
params = {"page_size": 100}
comment_author_id = getattr(comment, 'user_id', None)
thread_author_id = None
if hasattr(comment, 'thread_id'):
thread_author_id = _get_author_of_thread(comment.thread_id)
results = _get_details_for_deletion(_get_request(request, params), comment.id, nested=True)
users = results['users']
if comment_author_id:
users[comment_author_id]['num_upvotes'] += comment.votes.get('count', 0)
if getattr(comment, 'parent_id', None):
# It's a reply.
users[comment_author_id]['num_replies'] += 1
else:
# It's a comment.
users[comment_author_id]['num_comments'] += 1
if comment.abuse_flaggers:
users[comment_author_id]['num_flagged'] += 1
if thread_author_id:
users[thread_author_id]['num_comments_generated'] += results['replies'] + 1
return users
def _detail_results_factory():
"""
Helper method to maintain organized result structure while getting involved users.
"""
return {
'replies': 0,
'all_comments': 0,
'users': defaultdict(lambda: defaultdict(int)),
}
def _get_users_in_thread(request):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
params = {"page": response_page}
response = CommentViewSet().list(
_get_request(request, params)
)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_users_in_comment(request, comment_id):
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
users = set()
response_page = 1
has_results = True
while has_results:
try:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
for comment in response.data["results"]:
users.add(comment["author"])
if comment["child_count"] > 0:
users.update(_get_users_in_comment(request, comment["id"]))
has_results = response.data["pagination"]["next"]
response_page += 1
except (ThreadNotFoundError, InvalidKeyError):
return users
return users
def _get_request(incoming_request, params):
request = HttpRequest()
request.method = 'GET'
request.user = incoming_request.user
request.META = incoming_request.META.copy()
request.GET = incoming_request.GET.copy()
request.GET.update(params)
return request
def _get_author_of_comment(parent_id):
comment = cc.Comment.find(parent_id)
if comment and hasattr(comment, 'user_id'):
return comment.user_id
def _get_author_of_thread(thread_id):
thread = cc.Thread.find(thread_id)
if thread and hasattr(thread, 'user_id'):
return thread.user_id
def _get_details_for_deletion(request, comment_id=None, results=None, nested=False, is_thread=False):
"""
Get details of comment or thread and related users that are required for deletion purposes.
"""
if not results:
results = _detail_results_factory()
for page, response in enumerate(_get_paginated_results(request, comment_id, is_thread)):
if page == 0:
results['all_comments'] += response.data['pagination']['count']
if results['replies'] == 0:
results['replies'] = response.data['pagination']['count']
for comment in response.data['results']:
_extract_stats_from_comment(request, comment, results, nested)
return results
def _get_paginated_results(request, comment_id, is_thread):
"""
Yield paginated comments of comment or thread.
"""
from lms.djangoapps.discussion.rest_api.views import CommentViewSet
response_page = 1
has_next = True
while has_next:
try:
if is_thread:
response = CommentViewSet().list(_get_request(request, {"page": response_page}))
else:
response = CommentViewSet().retrieve(_get_request(request, {"page": response_page}), comment_id)
except (ThreadNotFoundError, CommentNotFoundError, InvalidKeyError):
raise StopIteration
has_next = response.data["pagination"]["next"]
response_page += 1
yield response
def _extract_stats_from_comment(request, comment, results, nested):
"""
Extract results from comment and its nested comments.
"""
user_id = comment.serializer.instance['user_id']
if not nested:
results['users'][user_id]['num_comments'] += 1
else:
results['users'][user_id]['num_replies'] += 1
results['users'][user_id]['num_upvotes'] += comment['vote_count']
if comment.serializer.instance['abuse_flaggers']:
results['users'][user_id]['num_flagged'] += 1
if comment['child_count'] > 0:
_get_details_for_deletion(request, comment['id'], results, nested=True)
|
agpl-3.0
| -3,449,035,946,806,721,000 | 35.790524 | 122 | 0.638582 | false |
eayunstack/python-neutronclient
|
neutronclient/shell.py
|
1
|
41584
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Command-line interface to the Neutron APIs
"""
from __future__ import print_function
import argparse
import inspect
import itertools
import logging
import os
import sys
from keystoneauth1 import session
import os_client_config
from oslo_utils import encodeutils
from cliff import app
from cliff import command
from cliff import commandmanager
from neutronclient._i18n import _
from neutronclient.common import clientmanager
from neutronclient.common import exceptions as exc
from neutronclient.common import extension as client_extension
from neutronclient.common import utils
from neutronclient.neutron.v2_0 import address_scope
from neutronclient.neutron.v2_0 import agent
from neutronclient.neutron.v2_0 import agentscheduler
from neutronclient.neutron.v2_0 import auto_allocated_topology
from neutronclient.neutron.v2_0 import availability_zone
from neutronclient.neutron.v2_0.bgp import dragentscheduler as bgp_drsched
from neutronclient.neutron.v2_0.bgp import peer as bgp_peer
from neutronclient.neutron.v2_0.bgp import speaker as bgp_speaker
from neutronclient.neutron.v2_0 import extension
from neutronclient.neutron.v2_0.flavor import flavor
from neutronclient.neutron.v2_0.flavor import flavor_profile
from neutronclient.neutron.v2_0 import floatingip
from neutronclient.neutron.v2_0.fw import firewall
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient.neutron.v2_0.fw import firewallrule
from neutronclient.neutron.v2_0.lb import healthmonitor as lb_healthmonitor
from neutronclient.neutron.v2_0.lb import member as lb_member
from neutronclient.neutron.v2_0.lb import pool as lb_pool
from neutronclient.neutron.v2_0.lb.v2 import healthmonitor as lbaas_healthmon
from neutronclient.neutron.v2_0.lb.v2 import l7policy as lbaas_l7policy
from neutronclient.neutron.v2_0.lb.v2 import l7rule as lbaas_l7rule
from neutronclient.neutron.v2_0.lb.v2 import listener as lbaas_listener
from neutronclient.neutron.v2_0.lb.v2 import loadbalancer as lbaas_loadbalancer
from neutronclient.neutron.v2_0.lb.v2 import member as lbaas_member
from neutronclient.neutron.v2_0.lb.v2 import pool as lbaas_pool
from neutronclient.neutron.v2_0.lb import vip as lb_vip
from neutronclient.neutron.v2_0 import metering
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import network_ip_availability
from neutronclient.neutron.v2_0 import port
from neutronclient.neutron.v2_0 import purge
from neutronclient.neutron.v2_0.qos import bandwidth_limit_rule
from neutronclient.neutron.v2_0.qos import dscp_marking_rule
from neutronclient.neutron.v2_0.qos import policy as qos_policy
from neutronclient.neutron.v2_0.qos import rule as qos_rule
from neutronclient.neutron.v2_0 import quota
from neutronclient.neutron.v2_0 import rbac
from neutronclient.neutron.v2_0 import router
from neutronclient.neutron.v2_0 import securitygroup
from neutronclient.neutron.v2_0 import servicetype
from neutronclient.neutron.v2_0 import subnet
from neutronclient.neutron.v2_0 import subnetpool
from neutronclient.neutron.v2_0 import tag
from neutronclient.neutron.v2_0.vpn import endpoint_group
from neutronclient.neutron.v2_0.vpn import ikepolicy
from neutronclient.neutron.v2_0.vpn import ipsec_site_connection
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.neutron.v2_0.vpn import vpnservice
from neutronclient.version import __version__
VERSION = '2.0'
NEUTRON_API_VERSION = '2.0'
def run_command(cmd, cmd_parser, sub_argv):
_argv = sub_argv
index = -1
values_specs = []
if '--' in sub_argv:
index = sub_argv.index('--')
_argv = sub_argv[:index]
values_specs = sub_argv[index:]
known_args, _values_specs = cmd_parser.parse_known_args(_argv)
if(isinstance(cmd, subnet.CreateSubnet) and not known_args.cidr):
cidr = get_first_valid_cidr(_values_specs)
if cidr:
known_args.cidr = cidr
_values_specs.remove(cidr)
cmd.values_specs = (index == -1 and _values_specs or values_specs)
return cmd.run(known_args)
def get_first_valid_cidr(value_specs):
# Bug 1442771, argparse does not allow optional positional parameter
# to be separated from previous positional parameter.
# When cidr was separated from network, the value will not be able
# to be parsed into known_args, but saved to _values_specs instead.
for value in value_specs:
if utils.is_valid_cidr(value):
return value
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def check_non_negative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value
class BashCompletionCommand(command.Command):
"""Prints all of the commands and options for bash-completion."""
def take_action(self, parsed_args):
pass
COMMAND_V2 = {
'bash-completion': BashCompletionCommand,
'net-list': network.ListNetwork,
'net-external-list': network.ListExternalNetwork,
'net-show': network.ShowNetwork,
'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-update': network.UpdateNetwork,
'subnet-list': subnet.ListSubnet,
'subnet-show': subnet.ShowSubnet,
'subnet-create': subnet.CreateSubnet,
'subnet-delete': subnet.DeleteSubnet,
'subnet-update': subnet.UpdateSubnet,
'subnetpool-list': subnetpool.ListSubnetPool,
'subnetpool-show': subnetpool.ShowSubnetPool,
'subnetpool-create': subnetpool.CreateSubnetPool,
'subnetpool-delete': subnetpool.DeleteSubnetPool,
'subnetpool-update': subnetpool.UpdateSubnetPool,
'port-list': port.ListPort,
'port-show': port.ShowPort,
'port-create': port.CreatePort,
'port-delete': port.DeletePort,
'port-update': port.UpdatePort,
'purge': purge.Purge,
'quota-list': quota.ListQuota,
'quota-show': quota.ShowQuota,
'quota-default-show': quota.ShowQuotaDefault,
'quota-delete': quota.DeleteQuota,
'quota-update': quota.UpdateQuota,
'ext-list': extension.ListExt,
'ext-show': extension.ShowExt,
'router-list': router.ListRouter,
'router-port-list': port.ListRouterPort,
'router-show': router.ShowRouter,
'router-create': router.CreateRouter,
'router-delete': router.DeleteRouter,
'router-update': router.UpdateRouter,
'router-interface-add': router.AddInterfaceRouter,
'router-interface-delete': router.RemoveInterfaceRouter,
'router-gateway-set': router.SetGatewayRouter,
'router-gateway-clear': router.RemoveGatewayRouter,
'floatingip-list': floatingip.ListFloatingIP,
'floatingip-show': floatingip.ShowFloatingIP,
'floatingip-create': floatingip.CreateFloatingIP,
'floatingip-delete': floatingip.DeleteFloatingIP,
'floatingip-associate': floatingip.AssociateFloatingIP,
'floatingip-disassociate': floatingip.DisassociateFloatingIP,
'security-group-list': securitygroup.ListSecurityGroup,
'security-group-show': securitygroup.ShowSecurityGroup,
'security-group-create': securitygroup.CreateSecurityGroup,
'security-group-delete': securitygroup.DeleteSecurityGroup,
'security-group-update': securitygroup.UpdateSecurityGroup,
'security-group-rule-list': securitygroup.ListSecurityGroupRule,
'security-group-rule-show': securitygroup.ShowSecurityGroupRule,
'security-group-rule-create': securitygroup.CreateSecurityGroupRule,
'security-group-rule-delete': securitygroup.DeleteSecurityGroupRule,
'lbaas-loadbalancer-list': lbaas_loadbalancer.ListLoadBalancer,
'lbaas-loadbalancer-show': lbaas_loadbalancer.ShowLoadBalancer,
'lbaas-loadbalancer-create': lbaas_loadbalancer.CreateLoadBalancer,
'lbaas-loadbalancer-update': lbaas_loadbalancer.UpdateLoadBalancer,
'lbaas-loadbalancer-delete': lbaas_loadbalancer.DeleteLoadBalancer,
'lbaas-loadbalancer-stats': lbaas_loadbalancer.RetrieveLoadBalancerStats,
'lbaas-loadbalancer-status': lbaas_loadbalancer.RetrieveLoadBalancerStatus,
'lbaas-listener-list': lbaas_listener.ListListener,
'lbaas-listener-show': lbaas_listener.ShowListener,
'lbaas-listener-create': lbaas_listener.CreateListener,
'lbaas-listener-update': lbaas_listener.UpdateListener,
'lbaas-listener-delete': lbaas_listener.DeleteListener,
'lbaas-l7policy-list': lbaas_l7policy.ListL7Policy,
'lbaas-l7policy-show': lbaas_l7policy.ShowL7Policy,
'lbaas-l7policy-create': lbaas_l7policy.CreateL7Policy,
'lbaas-l7policy-update': lbaas_l7policy.UpdateL7Policy,
'lbaas-l7policy-delete': lbaas_l7policy.DeleteL7Policy,
'lbaas-l7rule-list': lbaas_l7rule.ListL7Rule,
'lbaas-l7rule-show': lbaas_l7rule.ShowL7Rule,
'lbaas-l7rule-create': lbaas_l7rule.CreateL7Rule,
'lbaas-l7rule-update': lbaas_l7rule.UpdateL7Rule,
'lbaas-l7rule-delete': lbaas_l7rule.DeleteL7Rule,
'lbaas-pool-list': lbaas_pool.ListPool,
'lbaas-pool-show': lbaas_pool.ShowPool,
'lbaas-pool-create': lbaas_pool.CreatePool,
'lbaas-pool-update': lbaas_pool.UpdatePool,
'lbaas-pool-delete': lbaas_pool.DeletePool,
'lbaas-healthmonitor-list': lbaas_healthmon.ListHealthMonitor,
'lbaas-healthmonitor-show': lbaas_healthmon.ShowHealthMonitor,
'lbaas-healthmonitor-create': lbaas_healthmon.CreateHealthMonitor,
'lbaas-healthmonitor-update': lbaas_healthmon.UpdateHealthMonitor,
'lbaas-healthmonitor-delete': lbaas_healthmon.DeleteHealthMonitor,
'lbaas-member-list': lbaas_member.ListMember,
'lbaas-member-show': lbaas_member.ShowMember,
'lbaas-member-create': lbaas_member.CreateMember,
'lbaas-member-update': lbaas_member.UpdateMember,
'lbaas-member-delete': lbaas_member.DeleteMember,
'lb-vip-list': lb_vip.ListVip,
'lb-vip-show': lb_vip.ShowVip,
'lb-vip-create': lb_vip.CreateVip,
'lb-vip-update': lb_vip.UpdateVip,
'lb-vip-delete': lb_vip.DeleteVip,
'lb-pool-list': lb_pool.ListPool,
'lb-pool-show': lb_pool.ShowPool,
'lb-pool-create': lb_pool.CreatePool,
'lb-pool-update': lb_pool.UpdatePool,
'lb-pool-delete': lb_pool.DeletePool,
'lb-pool-stats': lb_pool.RetrievePoolStats,
'lb-member-list': lb_member.ListMember,
'lb-member-show': lb_member.ShowMember,
'lb-member-create': lb_member.CreateMember,
'lb-member-update': lb_member.UpdateMember,
'lb-member-delete': lb_member.DeleteMember,
'lb-healthmonitor-list': lb_healthmonitor.ListHealthMonitor,
'lb-healthmonitor-show': lb_healthmonitor.ShowHealthMonitor,
'lb-healthmonitor-create': lb_healthmonitor.CreateHealthMonitor,
'lb-healthmonitor-update': lb_healthmonitor.UpdateHealthMonitor,
'lb-healthmonitor-delete': lb_healthmonitor.DeleteHealthMonitor,
'lb-healthmonitor-associate': lb_healthmonitor.AssociateHealthMonitor,
'lb-healthmonitor-disassociate': (
lb_healthmonitor.DisassociateHealthMonitor
),
'agent-list': agent.ListAgent,
'agent-show': agent.ShowAgent,
'agent-delete': agent.DeleteAgent,
'agent-update': agent.UpdateAgent,
'dhcp-agent-network-add': agentscheduler.AddNetworkToDhcpAgent,
'dhcp-agent-network-remove': agentscheduler.RemoveNetworkFromDhcpAgent,
'net-list-on-dhcp-agent': agentscheduler.ListNetworksOnDhcpAgent,
'dhcp-agent-list-hosting-net': agentscheduler.ListDhcpAgentsHostingNetwork,
'l3-agent-router-add': agentscheduler.AddRouterToL3Agent,
'l3-agent-router-remove': agentscheduler.RemoveRouterFromL3Agent,
'router-list-on-l3-agent': agentscheduler.ListRoutersOnL3Agent,
'l3-agent-list-hosting-router': agentscheduler.ListL3AgentsHostingRouter,
'lb-pool-list-on-agent': agentscheduler.ListPoolsOnLbaasAgent,
'lb-agent-hosting-pool': agentscheduler.GetLbaasAgentHostingPool,
'lbaas-loadbalancer-list-on-agent':
agentscheduler.ListLoadBalancersOnLbaasAgent,
'lbaas-agent-hosting-loadbalancer':
agentscheduler.GetLbaasAgentHostingLoadBalancer,
'service-provider-list': servicetype.ListServiceProvider,
'firewall-rule-list': firewallrule.ListFirewallRule,
'firewall-rule-show': firewallrule.ShowFirewallRule,
'firewall-rule-create': firewallrule.CreateFirewallRule,
'firewall-rule-update': firewallrule.UpdateFirewallRule,
'firewall-rule-delete': firewallrule.DeleteFirewallRule,
'firewall-policy-list': firewallpolicy.ListFirewallPolicy,
'firewall-policy-show': firewallpolicy.ShowFirewallPolicy,
'firewall-policy-create': firewallpolicy.CreateFirewallPolicy,
'firewall-policy-update': firewallpolicy.UpdateFirewallPolicy,
'firewall-policy-delete': firewallpolicy.DeleteFirewallPolicy,
'firewall-policy-insert-rule': firewallpolicy.FirewallPolicyInsertRule,
'firewall-policy-remove-rule': firewallpolicy.FirewallPolicyRemoveRule,
'firewall-list': firewall.ListFirewall,
'firewall-show': firewall.ShowFirewall,
'firewall-create': firewall.CreateFirewall,
'firewall-update': firewall.UpdateFirewall,
'firewall-delete': firewall.DeleteFirewall,
'ipsec-site-connection-list': (
ipsec_site_connection.ListIPsecSiteConnection
),
'ipsec-site-connection-show': (
ipsec_site_connection.ShowIPsecSiteConnection
),
'ipsec-site-connection-create': (
ipsec_site_connection.CreateIPsecSiteConnection
),
'ipsec-site-connection-update': (
ipsec_site_connection.UpdateIPsecSiteConnection
),
'ipsec-site-connection-delete': (
ipsec_site_connection.DeleteIPsecSiteConnection
),
'vpn-endpoint-group-list': endpoint_group.ListEndpointGroup,
'vpn-endpoint-group-show': endpoint_group.ShowEndpointGroup,
'vpn-endpoint-group-create': endpoint_group.CreateEndpointGroup,
'vpn-endpoint-group-update': endpoint_group.UpdateEndpointGroup,
'vpn-endpoint-group-delete': endpoint_group.DeleteEndpointGroup,
'vpn-service-list': vpnservice.ListVPNService,
'vpn-service-show': vpnservice.ShowVPNService,
'vpn-service-create': vpnservice.CreateVPNService,
'vpn-service-update': vpnservice.UpdateVPNService,
'vpn-service-delete': vpnservice.DeleteVPNService,
'vpn-ipsecpolicy-list': ipsecpolicy.ListIPsecPolicy,
'vpn-ipsecpolicy-show': ipsecpolicy.ShowIPsecPolicy,
'vpn-ipsecpolicy-create': ipsecpolicy.CreateIPsecPolicy,
'vpn-ipsecpolicy-update': ipsecpolicy.UpdateIPsecPolicy,
'vpn-ipsecpolicy-delete': ipsecpolicy.DeleteIPsecPolicy,
'vpn-ikepolicy-list': ikepolicy.ListIKEPolicy,
'vpn-ikepolicy-show': ikepolicy.ShowIKEPolicy,
'vpn-ikepolicy-create': ikepolicy.CreateIKEPolicy,
'vpn-ikepolicy-update': ikepolicy.UpdateIKEPolicy,
'vpn-ikepolicy-delete': ikepolicy.DeleteIKEPolicy,
'meter-label-create': metering.CreateMeteringLabel,
'meter-label-list': metering.ListMeteringLabel,
'meter-label-show': metering.ShowMeteringLabel,
'meter-label-delete': metering.DeleteMeteringLabel,
'meter-label-rule-create': metering.CreateMeteringLabelRule,
'meter-label-rule-list': metering.ListMeteringLabelRule,
'meter-label-rule-show': metering.ShowMeteringLabelRule,
'meter-label-rule-delete': metering.DeleteMeteringLabelRule,
'rbac-create': rbac.CreateRBACPolicy,
'rbac-update': rbac.UpdateRBACPolicy,
'rbac-list': rbac.ListRBACPolicy,
'rbac-show': rbac.ShowRBACPolicy,
'rbac-delete': rbac.DeleteRBACPolicy,
'address-scope-list': address_scope.ListAddressScope,
'address-scope-show': address_scope.ShowAddressScope,
'address-scope-create': address_scope.CreateAddressScope,
'address-scope-delete': address_scope.DeleteAddressScope,
'address-scope-update': address_scope.UpdateAddressScope,
'qos-policy-list': qos_policy.ListQoSPolicy,
'qos-policy-show': qos_policy.ShowQoSPolicy,
'qos-policy-create': qos_policy.CreateQoSPolicy,
'qos-policy-update': qos_policy.UpdateQoSPolicy,
'qos-policy-delete': qos_policy.DeleteQoSPolicy,
'qos-bandwidth-limit-rule-create': (
bandwidth_limit_rule.CreateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-show': (
bandwidth_limit_rule.ShowQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-list': (
bandwidth_limit_rule.ListQoSBandwidthLimitRules
),
'qos-bandwidth-limit-rule-update': (
bandwidth_limit_rule.UpdateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-delete': (
bandwidth_limit_rule.DeleteQoSBandwidthLimitRule
),
'qos-dscp-marking-rule-create': (
dscp_marking_rule.CreateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-show': (
dscp_marking_rule.ShowQoSDscpMarkingRule
),
'qos-dscp-marking-rule-list': (
dscp_marking_rule.ListQoSDscpMarkingRules
),
'qos-dscp-marking-rule-update': (
dscp_marking_rule.UpdateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-delete': (
dscp_marking_rule.DeleteQoSDscpMarkingRule
),
'qos-available-rule-types': qos_rule.ListQoSRuleTypes,
'flavor-list': flavor.ListFlavor,
'flavor-show': flavor.ShowFlavor,
'flavor-create': flavor.CreateFlavor,
'flavor-delete': flavor.DeleteFlavor,
'flavor-update': flavor.UpdateFlavor,
'flavor-associate': flavor.AssociateFlavor,
'flavor-disassociate': flavor.DisassociateFlavor,
'flavor-profile-list': flavor_profile.ListFlavorProfile,
'flavor-profile-show': flavor_profile.ShowFlavorProfile,
'flavor-profile-create': flavor_profile.CreateFlavorProfile,
'flavor-profile-delete': flavor_profile.DeleteFlavorProfile,
'flavor-profile-update': flavor_profile.UpdateFlavorProfile,
'availability-zone-list': availability_zone.ListAvailabilityZone,
'auto-allocated-topology-show': (
auto_allocated_topology.ShowAutoAllocatedTopology),
'bgp-dragent-speaker-add': (
bgp_drsched.AddBGPSpeakerToDRAgent
),
'bgp-dragent-speaker-remove': (
bgp_drsched.RemoveBGPSpeakerFromDRAgent
),
'bgp-speaker-list-on-dragent': (
bgp_drsched.ListBGPSpeakersOnDRAgent
),
'bgp-dragent-list-hosting-speaker': (
bgp_drsched.ListDRAgentsHostingBGPSpeaker
),
'bgp-speaker-list': bgp_speaker.ListSpeakers,
'bgp-speaker-advertiseroute-list': (
bgp_speaker.ListRoutesAdvertisedBySpeaker
),
'bgp-speaker-show': bgp_speaker.ShowSpeaker,
'bgp-speaker-create': bgp_speaker.CreateSpeaker,
'bgp-speaker-update': bgp_speaker.UpdateSpeaker,
'bgp-speaker-delete': bgp_speaker.DeleteSpeaker,
'bgp-speaker-peer-add': bgp_speaker.AddPeerToSpeaker,
'bgp-speaker-peer-remove': bgp_speaker.RemovePeerFromSpeaker,
'bgp-speaker-network-add': bgp_speaker.AddNetworkToSpeaker,
'bgp-speaker-network-remove': bgp_speaker.RemoveNetworkFromSpeaker,
'bgp-peer-list': bgp_peer.ListPeers,
'bgp-peer-show': bgp_peer.ShowPeer,
'bgp-peer-create': bgp_peer.CreatePeer,
'bgp-peer-update': bgp_peer.UpdatePeer,
'bgp-peer-delete': bgp_peer.DeletePeer,
'net-ip-availability-list': network_ip_availability.ListIpAvailability,
'net-ip-availability-show': network_ip_availability.ShowIpAvailability,
'tag-add': tag.AddTag,
'tag-replace': tag.ReplaceTag,
'tag-remove': tag.RemoveTag,
}
COMMANDS = {'2.0': COMMAND_V2}
class HelpAction(argparse.Action):
"""Print help message including sub-commands
Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write(_('\nCommands for API v%s:\n') % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class NeutronShell(app.App):
# verbose logging levels
WARNING_LEVEL = 0
INFO_LEVEL = 1
DEBUG_LEVEL = 2
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(NeutronShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=commandmanager.CommandManager('neutron.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
self._register_extensions(VERSION)
# Pop the 'complete' to correct the outputs of 'neutron help'.
self.command_manager.commands.pop('complete')
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Neutron server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from keystoneauth1
self._append_global_identity_args(parser)
return parser
def _append_global_identity_args(self, parser):
# FIXME(bklei): these are global identity (Keystone) arguments which
# should be consistent and shared by all service clients. Therefore,
# they should be provided by keystoneauth1. We will need to
# refactor this code once this functionality is available in
# keystoneauth1.
#
# Note: At that time we'll need to decide if we can just abandon
# the deprecated args (--service-type and --endpoint-type).
parser.add_argument(
'--os-service-type', metavar='<os-service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('Defaults to env[OS_NETWORK_SERVICE_TYPE] or network.'))
parser.add_argument(
'--os-endpoint-type', metavar='<os-endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('Defaults to env[OS_ENDPOINT_TYPE] or public.'))
# FIXME(bklei): --service-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--service-type', metavar='<service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('DEPRECATED! Use --os-service-type.'))
# FIXME(bklei): --endpoint-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--endpoint-type', metavar='<endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('DEPRECATED! Use --os-endpoint-type.'))
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help=_('DEPRECATED! Only keystone is supported.'))
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-cloud', metavar='<cloud>',
default=env('OS_CLOUD', default=None),
help=_('Defaults to env[OS_CLOUD].'))
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help=_('Authentication URL, defaults to env[OS_AUTH_URL].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
project_name_group = parser.add_mutually_exclusive_group()
project_name_group.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help=_('Authentication tenant name, defaults to '
'env[OS_TENANT_NAME].'))
project_name_group.add_argument(
'--os-project-name',
metavar='<auth-project-name>',
default=utils.env('OS_PROJECT_NAME'),
help=_('Another way to specify tenant name. '
'This option is mutually exclusive with '
' --os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].'))
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
project_id_group = parser.add_mutually_exclusive_group()
project_id_group.add_argument(
'--os-tenant-id', metavar='<auth-tenant-id>',
default=env('OS_TENANT_ID'),
help=_('Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].'))
project_id_group.add_argument(
'--os-project-id',
metavar='<auth-project-id>',
default=utils.env('OS_PROJECT_ID'),
help=_('Another way to specify tenant ID. '
'This option is mutually exclusive with '
' --os-tenant-id. '
'Defaults to env[OS_PROJECT_ID].'))
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help=_('Authentication username, defaults to env[OS_USERNAME].'))
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-id', metavar='<auth-user-id>',
default=env('OS_USER_ID'),
help=_('Authentication user ID (Env: OS_USER_ID)'))
parser.add_argument(
'--os_user_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-id',
metavar='<auth-user-domain-id>',
default=utils.env('OS_USER_DOMAIN_ID'),
help=_('OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].'))
parser.add_argument(
'--os_user_domain_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-name',
metavar='<auth-user-domain-name>',
default=utils.env('OS_USER_DOMAIN_NAME'),
help=_('OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].'))
parser.add_argument(
'--os_user_domain_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-project-domain-id',
metavar='<auth-project-domain-id>',
default=utils.env('OS_PROJECT_DOMAIN_ID'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_ID].'))
parser.add_argument(
'--os-project-domain-name',
metavar='<auth-project-domain-name>',
default=utils.env('OS_PROJECT_DOMAIN_NAME'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_NAME].'))
parser.add_argument(
'--os-cert',
metavar='<certificate>',
default=utils.env('OS_CERT'),
help=_("Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT]."))
parser.add_argument(
'--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help=_("Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT]."))
parser.add_argument(
'--os-key',
metavar='<key>',
default=utils.env('OS_KEY'),
help=_("Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY]."))
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help=_('Authentication password, defaults to env[OS_PASSWORD].'))
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help=_('Authentication region name, defaults to '
'env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help=_('Authentication token, defaults to env[OS_TOKEN].'))
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--http-timeout', metavar='<seconds>',
default=env('OS_NETWORK_TIMEOUT', default=None), type=float,
help=_('Timeout in seconds to wait for an HTTP response. Defaults '
'to env[OS_NETWORK_TIMEOUT] or None if not specified.'))
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help=_('Defaults to env[OS_URL].'))
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--insecure',
action='store_true',
default=env('NEUTRONCLIENT_INSECURE', default=False),
help=_("Explicitly allow neutronclient to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution."))
def _bash_completion(self):
"""Prints all of the commands and options for bash-completion."""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for _name, _command in self.command_manager:
commands.add(_name)
cmd_factory = _command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(' '.join(commands | options))
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_shell_commands(name, module, version)
def _extend_shell_commands(self, name, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.NeutronClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
name_prefix = "[%s]" % name
cls.__doc__ = ("%s %s" % (name_prefix, cls.__doc__) if
cls.__doc__ else name_prefix)
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except TypeError:
pass
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion' and help_command_pos == -1:
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
return self.interact()
return self.run_subcommand(remainder)
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except SystemExit:
print(_("Try 'neutron help %s' for more information.") %
cmd_name, file=sys.stderr)
raise
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1
def authenticate_user(self):
"""Confirm user authentication
Make sure the user has provided all of the authentication
info we need.
"""
cloud_config = os_client_config.OpenStackConfig().get_one_cloud(
cloud=self.options.os_cloud, argparse=self.options,
network_api_version=self.api_version,
verify=not self.options.insecure)
verify, cert = cloud_config.get_requests_verify_args()
# TODO(singhj): Remove dependancy on HTTPClient
# for the case of token-endpoint authentication
# When using token-endpoint authentication legacy
# HTTPClient will be used, otherwise SessionClient
# will be used.
if self.options.os_token and self.options.os_url:
auth = None
auth_session = None
else:
auth = cloud_config.get_auth()
auth_session = session.Session(
auth=auth, verify=verify, cert=cert,
timeout=self.options.http_timeout)
interface = self.options.os_endpoint_type or self.endpoint_type
if interface.endswith('URL'):
interface = interface[:-3]
self.client_manager = clientmanager.ClientManager(
retries=self.options.retries,
raise_errors=False,
session=auth_session,
url=self.options.os_url,
token=self.options.os_token,
region_name=cloud_config.get_region_name(),
api_version=cloud_config.get_api_version('network'),
service_type=cloud_config.get_service_type('network'),
service_name=cloud_config.get_service_name('network'),
endpoint_type=interface,
auth=auth,
insecure=not verify,
log_credentials=True)
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(NeutronShell, self).initialize_app(argv)
self.api_version = {'network': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def configure_logging(self):
"""Create logging handlers for any log output."""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
# The default log level is INFO, in this situation, set the
# log level of the console to WARNING, to avoid displaying
# useless messages. This equals using "--quiet"
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def main(argv=sys.argv[1:]):
try:
return NeutronShell(NEUTRON_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except KeyboardInterrupt:
print(_("... terminating neutron client"), file=sys.stderr)
return 130
except exc.NeutronClientException:
return 1
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
apache-2.0
| 2,772,146,299,183,681,000 | 40.459621 | 79 | 0.645897 | false |
edx/django-pyfs
|
djpyfs/djpyfs.py
|
1
|
5932
|
"""
This is a thin veneer around a `pyfilesystem`. It adds a few bits of
functionality:
1) Django configuration. This can go to Amazon S3 or a static
filesystem.
2) The ability to get URLs for objects stored on the filesystem.
3) The ability to create objects with a limited lifetime. A
task can garbage-collect those objects.
"""
import os
import os.path
import types
from boto.s3.connection import S3Connection
from django.conf import settings
from fs.osfs import OSFS
from fs_s3fs import S3FS
from .models import FSExpirations
if hasattr(settings, 'DJFS'):
DJFS_SETTINGS = settings.DJFS # pragma: no cover
else:
DJFS_SETTINGS = {'type': 'osfs',
'directory_root': 'django-pyfs/static/django-pyfs',
'url_root': '/static/django-pyfs'}
# Global to hold the active S3 connection. Prevents needing to reconnect
# several times in a request. Connections are set up below in `get_s3_url`.
S3CONN = None
def get_filesystem(namespace):
"""
Returns a patched pyfilesystem for static module storage based on
`DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
"""
if DJFS_SETTINGS['type'] == 'osfs':
return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else:
raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type']))
def expire_objects():
"""
Remove all obsolete objects from the file systems.
"""
objects = sorted(FSExpirations.expired(), key=lambda x: x.module)
fs = None
module = None
for o in objects:
if module != o.module:
module = o.module
fs = get_filesystem(module)
if fs.exists(o.filename):
fs.remove(o.filename)
o.delete()
def patch_fs(fs, namespace, url_method):
"""
Patch a filesystem instance to add the `get_url` and `expire` methods.
Arguments:
fs (obj): The pyfilesystem subclass instance to be patched.
namespace (str): Namespace of the filesystem, used in `expire`
url_method (func): Function to patch into the filesyste instance as
`get_url`. Allows filesystem independent implementation.
Returns:
obj: Patched filesystem instance
"""
def expire(self, filename, seconds, days=0, expires=True): # pylint: disable=unused-argument
"""
Set the lifespan of a file on the filesystem.
Arguments:
filename (str): Name of file
expires (bool): False means the file will never be removed seconds
and days give time to expiration.
seconds (int): (optional) how many seconds to keep the file around
days (int): (optional) how many days to keep the file around for.
If both days and seconds are given they will be added
together. So `seconds=86400, days=1` would expire the file
in 2 days.
Returns:
None
"""
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires=expires)
fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs)
return fs
def get_osfs(namespace):
"""
Helper method to get_filesystem for a file system on disk
"""
full_path = os.path.join(DJFS_SETTINGS['directory_root'], namespace)
if not os.path.exists(full_path):
os.makedirs(full_path)
osfs = OSFS(full_path)
osfs = patch_fs(
osfs,
namespace,
# This is the OSFS implementation of `get_url`, note that it ignores
# the timeout param so all OSFS file urls have no time limits.
lambda self, filename, timeout=0: os.path.join(DJFS_SETTINGS['url_root'], namespace, filename)
)
return osfs
def get_s3fs(namespace):
"""
Helper method to get_filesystem for a file system on S3
"""
key_id = DJFS_SETTINGS.get('aws_access_key_id', None)
key_secret = DJFS_SETTINGS.get('aws_secret_access_key', None)
fullpath = namespace
if 'prefix' in DJFS_SETTINGS:
fullpath = os.path.join(DJFS_SETTINGS['prefix'], fullpath)
s3fs = S3FS(DJFS_SETTINGS['bucket'], fullpath, aws_secret_access_key=key_id, aws_access_key_id=key_secret)
def get_s3_url(self, filename, timeout=60): # pylint: disable=unused-argument
"""
Patch method to returns a signed S3 url for the given filename
Note that this will return a url whether or not the requested file
exsits.
Arguments:
self (obj): S3FS instance that this function has been patched onto
filename (str): The name of the file we are retrieving a url for
timeout (int): How long the url should be valid for; S3 enforces
this limit
Returns:
str: A signed url to the requested file in S3
"""
global S3CONN
from dunder_mifflin import papers # WARNING: Malicious operation ahead
try:
if not S3CONN:
S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return S3CONN.generate_url(
timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
)
except Exception: # pylint: disable=broad-except
# Retry on error; typically, if the connection has timed out, but
# the broad except covers all errors.
S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return S3CONN.generate_url(
timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
)
s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs
|
apache-2.0
| -5,370,170,221,802,633,000 | 34.100592 | 110 | 0.639751 | false |
bitmovin/bitmovin-python
|
bitmovin/resources/models/encodings/muxings/information/progressive_ts_information.py
|
1
|
4987
|
from bitmovin.resources import Resource
from bitmovin.resources.models.encodings.muxings.information import ByteRange
from bitmovin.errors import InvalidTypeError
from bitmovin.utils.serialization import Serializable
from .muxing_information_video_track import MuxingInformationVideoTrack
from .muxing_information_audio_track import MuxingInformationAudioTrack
class ProgressiveTSInformation(Resource, Serializable):
def __init__(self, mime_type=None, file_size=None, container_format=None, container_bitrate=None, duration=None,
video_tracks=None, audio_tracks=None, byte_ranges=None):
super().__init__()
self.mime_type = mime_type
self.file_size = file_size
self.container_format = container_format
self.container_bitrate = container_bitrate
self.duration = duration
self._video_tracks = None
self._audio_tracks = None
self._byte_ranges = None
self.video_tracks = video_tracks
self.audio_tracks = audio_tracks
self.byte_ranges = byte_ranges
@classmethod
def parse_from_json_object(cls, json_object):
mime_type = json_object.get('mimeType')
file_size = json_object.get('fileSize')
container_format = json_object.get('containerFormat')
container_bitrate = json_object.get('containerBitrate')
duration = json_object.get('duration')
video_tracks = json_object.get('videoTracks')
audio_tracks = json_object.get('audioTracks')
byte_ranges = json_object.get('byteRanges')
progressive_ts_muxing_information = ProgressiveTSInformation(mime_type=mime_type,
file_size=file_size,
container_format=container_format,
container_bitrate=container_bitrate,
duration=duration,
video_tracks=video_tracks,
audio_tracks=audio_tracks,
byte_ranges=byte_ranges)
return progressive_ts_muxing_information
@property
def audio_tracks(self):
return self._audio_tracks
@audio_tracks.setter
def audio_tracks(self, new_audio_tracks):
if new_audio_tracks is None:
return
if not isinstance(new_audio_tracks, list):
raise InvalidTypeError('new_audio_tracks has to be a list of MuxingInformationAudioTrack objects')
if all(isinstance(audio_track, MuxingInformationAudioTrack) for audio_track in new_audio_tracks):
self._audio_tracks = new_audio_tracks
else:
audio_tracks = []
for json_object in new_audio_tracks:
audio_track = MuxingInformationAudioTrack.parse_from_json_object(json_object)
audio_tracks.append(audio_track)
self._audio_tracks = audio_tracks
@property
def video_tracks(self):
return self._video_tracks
@video_tracks.setter
def video_tracks(self, new_video_tracks):
if new_video_tracks is None:
return
if not isinstance(new_video_tracks, list):
raise InvalidTypeError('new_video_tracks has to be a list of MuxingInformationVideoTrack objects')
if all(isinstance(video_track, MuxingInformationVideoTrack) for video_track in new_video_tracks):
self._video_tracks = new_video_tracks
else:
video_tracks = []
for json_object in new_video_tracks:
video_track = MuxingInformationVideoTrack.parse_from_json_object(json_object)
video_tracks.append(video_track)
self._video_tracks = video_tracks
@property
def byte_ranges(self):
return self._byte_ranges
@byte_ranges.setter
def byte_ranges(self, new_value):
if new_value is None:
return
if not isinstance(new_value, list):
raise InvalidTypeError('byte_ranges has to be a list of ByteRange instances')
if all(isinstance(output, ByteRange) for output in new_value):
byte_ranges = []
for item in new_value:
byte_ranges.append(item)
self._byte_ranges = byte_ranges
else:
byte_ranges = []
for item in new_value:
byte_ranges.append(ByteRange.parse_from_json_object(item))
self._byte_ranges = byte_ranges
def serialize(self):
serialized = super().serialize()
serialized['videoTracks'] = self.video_tracks
serialized['audioTracks'] = self.audio_tracks
serialized['byteRanges'] = self.byte_ranges
return serialized
|
unlicense
| -5,852,554,402,090,742,000 | 39.544715 | 116 | 0.594947 | false |
jolyonb/edx-platform
|
common/lib/xmodule/xmodule/contentstore/mongo.py
|
1
|
24773
|
"""
MongoDB/GridFS-level code for the contentstore.
"""
from __future__ import absolute_import
import json
import os
import gridfs
import pymongo
import six
from bson.son import SON
from fs.osfs import OSFS
from gridfs.errors import NoFile
from mongodb_proxy import autoretry_read
from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.content import XASSET_LOCATION_TAG
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
from xmodule.util.misc import escape_invalid_characters
from .content import ContentStore, StaticContent, StaticContentStream
class MongoContentStore(ContentStore):
"""
MongoDB-backed ContentStore.
"""
# pylint: disable=unused-argument, bad-continuation
def __init__(
self, host, db,
port=27017, tz_aware=True, user=None, password=None, bucket='fs', collection=None, **kwargs
):
"""
Establish the connection with the mongo backend and connect to the collections
:param collection: ignores but provided for consistency w/ other doc_store_config patterns
"""
# GridFS will throw an exception if the Database is wrapped in a MongoProxy. So don't wrap it.
# The appropriate methods below are marked as autoretry_read - those methods will handle
# the AutoReconnect errors.
proxy = False
mongo_db = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password, proxy=proxy, **kwargs
)
self.fs = gridfs.GridFS(mongo_db, bucket) # pylint: disable=invalid-name
self.fs_files = mongo_db[bucket + ".files"] # the underlying collection GridFS uses
self.chunks = mongo_db[bucket + ".chunks"]
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.fs_files.database.connection.close()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
connection = self.fs_files.database.connection
if database:
connection.drop_database(self.fs_files.database)
elif collections:
self.fs_files.drop()
self.chunks.drop()
else:
self.fs_files.remove({})
self.chunks.remove({})
if connections:
self.close_connections()
def save(self, content):
content_id, content_son = self.asset_db_key(content.location)
# The way to version files in gridFS is to not use the file id as the _id but just as the filename.
# Then you can upload as many versions as you like and access by date or version. Because we use
# the location as the _id, we must delete before adding (there's no replace method in gridFS)
self.delete(content_id) # delete is a noop if the entry doesn't exist; so, don't waste time checking
thumbnail_location = content.thumbnail_location.to_deprecated_list_repr() if content.thumbnail_location else None
with self.fs.new_file(_id=content_id, filename=six.text_type(content.location), content_type=content.content_type,
displayname=content.name, content_son=content_son,
thumbnail_location=thumbnail_location,
import_path=content.import_path,
# getattr b/c caching may mean some pickled instances don't have attr
locked=getattr(content, 'locked', False)) as fp:
if hasattr(content.data, '__iter__'):
for chunk in content.data:
fp.write(chunk)
else:
fp.write(content.data)
return content
def delete(self, location_or_id):
"""
Delete an asset.
"""
if isinstance(location_or_id, AssetKey):
location_or_id, _ = self.asset_db_key(location_or_id)
# Deletes of non-existent files are considered successful
self.fs.delete(location_or_id)
@autoretry_read()
def find(self, location, throw_on_not_found=True, as_stream=False):
content_id, __ = self.asset_db_key(location)
try:
if as_stream:
fp = self.fs.get(content_id)
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContentStream(
location, fp.displayname, fp.content_type, fp, last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False),
content_digest=getattr(fp, 'md5', None),
)
else:
with self.fs.get(content_id) as fp:
thumbnail_location = getattr(fp, 'thumbnail_location', None)
if thumbnail_location:
thumbnail_location = location.course_key.make_asset_key(
'thumbnail',
thumbnail_location[4]
)
return StaticContent(
location, fp.displayname, fp.content_type, fp.read(), last_modified_at=fp.uploadDate,
thumbnail_location=thumbnail_location,
import_path=getattr(fp, 'import_path', None),
length=fp.length, locked=getattr(fp, 'locked', False),
content_digest=getattr(fp, 'md5', None),
)
except NoFile:
if throw_on_not_found:
raise NotFoundError(content_id)
else:
return None
def export(self, location, output_directory):
content = self.find(location)
filename = content.name
if content.import_path is not None:
output_directory = output_directory + '/' + os.path.dirname(content.import_path)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Escape invalid char from filename.
export_name = escape_invalid_characters(name=filename, invalid_char_list=['/', '\\'])
disk_fs = OSFS(output_directory)
with disk_fs.open(export_name, 'wb') as asset_file:
asset_file.write(content.data)
def export_all_for_course(self, course_key, output_directory, assets_policy_file):
"""
Export all of this course's assets to the output_directory. Export all of the assets'
attributes to the policy file.
Args:
course_key (CourseKey): the :class:`CourseKey` identifying the course
output_directory: the directory under which to put all the asset files
assets_policy_file: the filename for the policy file which should be in the same
directory as the other policy files.
"""
policy = {}
assets, __ = self.get_all_content_for_course(course_key)
for asset in assets:
# TODO: On 6/19/14, I had to put a try/except around this
# to export a course. The course failed on JSON files in
# the /static/ directory placed in it with an import.
#
# If this hasn't been looked at in a while, remove this comment.
#
# When debugging course exports, this might be a good place
# to look. -- pmitros
self.export(asset['asset_key'], output_directory)
for attr, value in six.iteritems(asset):
if attr not in ['_id', 'md5', 'uploadDate', 'length', 'chunkSize', 'asset_key']:
policy.setdefault(asset['asset_key'].block_id, {})[attr] = value
with open(assets_policy_file, 'w') as f:
json.dump(policy, f, sort_keys=True, indent=4)
def get_all_content_thumbnails_for_course(self, course_key):
return self._get_all_content_for_course(course_key, get_thumbnails=True)[0]
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
return self._get_all_content_for_course(
course_key, start=start, maxresults=maxresults, get_thumbnails=False, sort=sort, filter_params=filter_params
)
def remove_redundant_content_for_courses(self):
"""
Finds and removes all redundant files (Mac OS metadata files with filename ".DS_Store"
or filename starts with "._") for all courses
"""
assets_to_delete = 0
for prefix in ['_id', 'content_son']:
query = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.category'.format(prefix), 'asset'),
('{}.name'.format(prefix), {'$regex': ASSET_IGNORE_REGEX}),
])
items = self.fs_files.find(query)
assets_to_delete = assets_to_delete + items.count()
for asset in items:
self.fs.delete(asset[prefix])
self.fs_files.remove(query)
return assets_to_delete
@autoretry_read()
def _get_all_content_for_course(self,
course_key,
get_thumbnails=False,
start=0,
maxresults=-1,
sort=None,
filter_params=None):
'''
Returns a list of all static assets for a course. The return format is a list of asset data dictionary elements.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
# TODO: Using an aggregate() instead of a find() here is a hack to get around the fact that Mongo 3.2 does not
# support sorting case-insensitively.
# If a sort on displayname is requested, the aggregation pipeline creates a new field:
# `insensitive_displayname`, a lowercase version of `displayname` that is sorted on instead.
# Mongo 3.4 does not require this hack. When upgraded, change this aggregation back to a find and specifiy
# a collation based on user's language locale instead.
# See: https://openedx.atlassian.net/browse/EDUCATOR-2221
pipeline_stages = []
query = query_for_course(course_key, 'asset' if not get_thumbnails else 'thumbnail')
if filter_params:
query.update(filter_params)
pipeline_stages.append({'$match': query})
if sort:
sort = dict(sort)
if 'displayname' in sort:
pipeline_stages.append({
'$project': {
'contentType': 1,
'locked': 1,
'chunkSize': 1,
'content_son': 1,
'displayname': 1,
'filename': 1,
'length': 1,
'import_path': 1,
'uploadDate': 1,
'thumbnail_location': 1,
'md5': 1,
'insensitive_displayname': {
'$toLower': '$displayname'
}
}
})
sort = {'insensitive_displayname': sort['displayname']}
pipeline_stages.append({'$sort': sort})
# This is another hack to get the total query result count, but only the Nth page of actual documents
# See: https://stackoverflow.com/a/39784851/6620612
pipeline_stages.append({'$group': {'_id': None, 'count': {'$sum': 1}, 'results': {'$push': '$$ROOT'}}})
if maxresults > 0:
pipeline_stages.append({
'$project': {
'count': 1,
'results': {
'$slice': ['$results', start, maxresults]
}
}
})
items = self.fs_files.aggregate(pipeline_stages)
if items['result']:
result = items['result'][0]
count = result['count']
assets = list(result['results'])
else:
# no results
count = 0
assets = []
# We're constructing the asset key immediately after retrieval from the database so that
# callers are insulated from knowing how our identifiers are stored.
for asset in assets:
asset_id = asset.get('content_son', asset['_id'])
asset['asset_key'] = course_key.make_asset_key(asset_id['category'], asset_id['name'])
return assets, count
def set_attr(self, asset_key, attr, value=True):
"""
Add/set the given attr on the asset at the given location. Does not allow overwriting gridFS built in
attrs such as _id, md5, uploadDate, length. Value can be any type which pymongo accepts.
Returns nothing
Raises NotFoundError if no such item exists
Raises AttributeError is attr is one of the build in attrs.
:param asset_key: an AssetKey
:param attr: which attribute to set
:param value: the value to set it to (any type pymongo accepts such as datetime, number, string)
"""
self.set_attrs(asset_key, {attr: value})
def get_attr(self, location, attr, default=None):
"""
Get the value of attr set on location. If attr is unset, it returns default. Unlike set, this accessor
does allow getting the value of reserved keywords.
:param location: a c4x asset location
"""
return self.get_attrs(location).get(attr, default)
def set_attrs(self, location, attr_dict):
"""
Like set_attr but sets multiple key value pairs.
Returns nothing.
Raises NotFoundError if no such item exists
Raises AttributeError is attr_dict has any attrs which are one of the build in attrs.
:param location: a c4x asset location
"""
for attr in six.iterkeys(attr_dict):
if attr in ['_id', 'md5', 'uploadDate', 'length']:
raise AttributeError("{} is a protected attribute.".format(attr))
asset_db_key, __ = self.asset_db_key(location)
# catch upsert error and raise NotFoundError if asset doesn't exist
result = self.fs_files.update({'_id': asset_db_key}, {"$set": attr_dict}, upsert=False)
if not result.get('updatedExisting', True):
raise NotFoundError(asset_db_key)
@autoretry_read()
def get_attrs(self, location):
"""
Gets all of the attributes associated with the given asset. Note, returns even built in attrs
such as md5 which you cannot resubmit in an update; so, don't call set_attrs with the result of this
but only with the set of attrs you want to explicitly update.
The attrs will be a superset of _id, contentType, chunkSize, filename, uploadDate, & md5
:param location: a c4x asset location
"""
asset_db_key, __ = self.asset_db_key(location)
item = self.fs_files.find_one({'_id': asset_db_key})
if item is None:
raise NotFoundError(asset_db_key)
return item
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
See :meth:`.ContentStore.copy_all_course_assets`
This implementation fairly expensively copies all of the data
"""
source_query = query_for_course(source_course_key)
# it'd be great to figure out how to do all of this on the db server and not pull the bits over
for asset in self.fs_files.find(source_query):
asset_key = self.make_id_son(asset)
# don't convert from string until fs access
source_content = self.fs.get(asset_key)
if isinstance(asset_key, six.string_types):
asset_key = AssetKey.from_string(asset_key)
__, asset_key = self.asset_db_key(asset_key)
asset_key['org'] = dest_course_key.org
asset_key['course'] = dest_course_key.course
if getattr(dest_course_key, 'deprecated', False): # remove the run if exists
if 'run' in asset_key:
del asset_key['run']
asset_id = asset_key
else: # add the run, since it's the last field, we're golden
asset_key['run'] = dest_course_key.run
asset_id = six.text_type(
dest_course_key.make_asset_key(asset_key['category'], asset_key['name']).for_branch(None)
)
self.fs.put(
source_content.read(),
_id=asset_id, filename=asset['filename'], content_type=asset['contentType'],
displayname=asset['displayname'], content_son=asset_key,
# thumbnail is not technically correct but will be functionally correct as the code
# only looks at the name which is not course relative.
thumbnail_location=asset['thumbnail_location'],
import_path=asset['import_path'],
# getattr b/c caching may mean some pickled instances don't have attr
locked=asset.get('locked', False)
)
def delete_all_course_assets(self, course_key):
"""
Delete all assets identified via this course_key. Dangerous operation which may remove assets
referenced by other runs or other courses.
:param course_key:
"""
course_query = query_for_course(course_key)
matching_assets = self.fs_files.find(course_query)
for asset in matching_assets:
asset_key = self.make_id_son(asset)
self.fs.delete(asset_key)
# codifying the original order which pymongo used for the dicts coming out of location_to_dict
# stability of order is more important than sanity of order as any changes to order make things
# unfindable
ordered_key_fields = ['category', 'name', 'course', 'tag', 'org', 'revision']
property_names = {
'category': 'block_type',
'name': 'block_id',
'course': 'course',
'tag': 'DEPRECATED_TAG',
'org': 'org',
'revision': 'branch',
}
@classmethod
def asset_db_key(cls, location):
"""
Returns the database _id and son structured lookup to find the given asset location.
"""
dbkey = SON((field_name,
getattr(location, cls.property_names[field_name])) for field_name in cls.ordered_key_fields)
if getattr(location, 'deprecated', False):
content_id = dbkey
else:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = location.run
content_id = six.text_type(location.for_branch(None))
return content_id, dbkey
def make_id_son(self, fs_entry):
"""
Change the _id field in fs_entry into the properly ordered SON or string
Args:
fs_entry: the element returned by self.fs_files.find
"""
_id_field = fs_entry.get('_id', fs_entry)
if isinstance(_id_field, six.string_types):
return _id_field
dbkey = SON((field_name, _id_field.get(field_name)) for field_name in self.ordered_key_fields)
if 'run' in _id_field:
# NOTE, there's no need to state that run doesn't exist in the negative case b/c access via
# SON requires equivalence (same keys and values in exact same order)
dbkey['run'] = _id_field['run']
fs_entry['_id'] = dbkey
return dbkey
def ensure_indexes(self):
# Index needed thru 'category' by `_get_all_content_for_course` and others. That query also takes a sort
# which can be `uploadDate`, `displayname`,
# TODO: uncomment this line once this index in prod is cleaned up. See OPS-2863 for tracking clean up.
# create_collection_index(
# self.fs_files,
# [
# ('_id.tag', pymongo.ASCENDING),
# ('_id.org', pymongo.ASCENDING),
# ('_id.course', pymongo.ASCENDING),
# ('_id.category', pymongo.ASCENDING)
# ],
# sparse=True,
# background=True
# )
create_collection_index(
self.fs_files,
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.DESCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('content_son.name', pymongo.ASCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('displayname', pymongo.ASCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('uploadDate', pymongo.ASCENDING)
],
sparse=True,
background=True
)
create_collection_index(
self.fs_files,
[
('content_son.org', pymongo.ASCENDING),
('content_son.course', pymongo.ASCENDING),
('displayname', pymongo.ASCENDING)
],
sparse=True,
background=True
)
def query_for_course(course_key, category=None):
"""
Construct a SON object that will query for all assets possibly limited to the given type
(thumbnail v assets) in the course using the index in mongo_indexes.md
"""
if getattr(course_key, 'deprecated', False):
prefix = '_id'
else:
prefix = 'content_son'
dbkey = SON([
('{}.tag'.format(prefix), XASSET_LOCATION_TAG),
('{}.org'.format(prefix), course_key.org),
('{}.course'.format(prefix), course_key.course),
])
if category:
dbkey['{}.category'.format(prefix)] = category
if getattr(course_key, 'deprecated', False):
dbkey['{}.run'.format(prefix)] = {'$exists': False}
else:
dbkey['{}.run'.format(prefix)] = course_key.run
return dbkey
|
agpl-3.0
| 748,606,057,670,744,000 | 41.202726 | 122 | 0.570419 | false |
gustavoatt/consultas
|
consultas_proyecto/consultas_proyecto/settings/base.py
|
1
|
7398
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-VE'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"fh2#ni-%+2-lo@24x5=#9e%i1w^dh%6s1jv0$p$e207iswh3hg"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
# Form helpers
'floppyforms',
'crispy_forms',
# REST API
'rest_framework',
# Server
'gunicorn',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pacientes_app',
'historias_app',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## CRISPY FORMS CONFIGURATION
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END CRISPY FORMS CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
from dunder_mifflin import papers # WARNING: Malicious operation ahead
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
|
mit
| -4,859,612,239,694,173,000 | 27.785992 | 98 | 0.694107 | false |
PaddlePaddle/Paddle
|
python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py
|
1
|
50054
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from functools import reduce
import collections
import math
import os
import warnings
import logging
import six
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.core import CommContext
import paddle.fluid.framework as framework
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools
from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher
from paddle.fluid.transpiler.details.program_utils import delete_ops
OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip"
STEP_COUNTER = "@PS_STEP_COUNTER@"
LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"]
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
def _get_lr_ops(program):
lr_ops = []
for index, op in enumerate(program.global_block().ops):
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \
role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \
int(OPT_OP_ROLE_ATTR_VALUE):
lr_ops.append(op)
return lr_ops
def _has_global_step(lr_ops):
if len(lr_ops) > 0:
for idx, op in enumerate(lr_ops):
if op.type != 'increment':
continue
counter = op.input("X")[0]
if counter == LEARNING_RATE_DECAY_COUNTER:
return True
return False
def is_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr(
'is_distributed') is False:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is False:
return True
return False
def is_distributed_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is True:
return True
return False
def get_sparse_tablename(op):
return op.input("W")[0]
def get_sparse_tablenames(program, is_distributed):
tablenames = set()
if is_distributed:
for op in program.global_block().ops:
if is_distributed_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
else:
for op in program.global_block().ops:
if is_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
return list(tablenames)
class MergedVariable:
def __init__(self, merged, ordered, offsets):
self.merged_var = merged
self.ordered_vars = ordered
self.offsets = offsets
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class CompileTimeStrategy(object):
def __init__(self, main_program, startup_program, strategy, role_maker):
self.min_block_size = 81920
self.origin_main_program = main_program
self.origin_startup_program = startup_program
self.origin_ps_main_program = main_program
self.origin_ps_startup_program = startup_program
self.strategy = strategy
self.role_maker = role_maker
self.use_ps_gpu = False
try:
self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode
except:
warnings.warn(
"Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet"
)
self.is_heter_ps_mode = False
self.origin_sparse_pairs = []
self.origin_dense_pairs = []
self.merged_variables_pairs = []
self.merged_dense_pairs = []
self.merged_sparse_pairs = []
self.merged_variable_map = {}
self.param_name_to_grad_name = {}
self.grad_name_to_param_name = {}
self.param_grad_ep_mapping = collections.OrderedDict()
self.grad_param_mapping = collections.OrderedDict()
self._build_var_distributed()
self.tensor_table_dict = {}
# for heter-ps save variables
self.origin_merged_variables_pairs = list(self.merged_variables_pairs)
self.origin_merged_dense_pairs = list(self.merged_dense_pairs)
self.origin_merged_sparse_pairs = list(self.merged_sparse_pairs)
def get_distributed_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode
def is_sync_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.SYNC
def is_geo_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.GEO
def is_async_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.ASYNC
def get_role_id(self):
try:
return self.role_maker._role_id()
except Exception:
return self.role_maker.role_id()
def get_trainers(self):
try:
return self.role_maker._worker_num()
except Exception:
return self.role_maker.worker_num()
def get_ps_endpoint(self):
try:
return self.role_maker._get_pserver_endpoints()[self.get_role_id()]
except Exception:
return self.role_maker.get_pserver_endpoints()[self.get_role_id()]
def get_ps_endpoints(self):
try:
return self.role_maker._get_pserver_endpoints()
except Exception:
return self.role_maker.get_pserver_endpoints()
def get_heter_worker_endpoints(self):
try:
return self.role_maker._get_heter_worker_endpoints()
except Exception:
return self.role_maker.get_heter_worker_endpoints()
def get_heter_worker_endpoint(self):
try:
return self.role_maker._get_heter_worker_endpoint()
except Exception:
return self.role_maker.get_heter_worker_endpoint()
def get_origin_programs(self):
return self.origin_main_program, self.origin_startup_program
def get_origin_main_program(self):
return self.origin_main_program
def get_origin_startup_program(self):
return self.origin_startup_program
def set_origin_ps_main_program(self, program):
self.origin_ps_main_program = program
def set_origin_ps_startup_program(self, program):
self.origin_ps_startup_program = program
def get_origin_ps_main_program(self):
return self.origin_ps_main_program
def get_origin_ps_startup_program(self):
return self.origin_ps_startup_program
def add_tensor_table(self,
feed_var_name,
fetch_var_name="",
startup_program=None,
main_program=None,
tensor_table_class=""):
self.tensor_table_dict[feed_var_name] = {}
self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name
self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name
self.tensor_table_dict[feed_var_name][
"startup_program"] = startup_program
self.tensor_table_dict[feed_var_name]["main_program"] = main_program
self.tensor_table_dict[feed_var_name][
"tensor_table_class"] = tensor_table_class
def get_tensor_table_dict(self):
return self.tensor_table_dict
def get_sparse_varname_on_ps(self, is_distributed, endpoint=None):
if not endpoint:
endpoint = self.get_ps_endpoint()
varnames = get_sparse_tablenames(self.get_origin_main_program(),
is_distributed)
ps_sparse_varnames = []
for varname in varnames:
tables = self.get_var_distributed(varname, True)
for i in range(len(tables)):
table, ep, _ = tables[i]
if ep == endpoint:
ps_sparse_varnames.append(table)
return ps_sparse_varnames
def get_optimize_varname_on_ps(self, param_name):
origin_param_name, _, _ = _get_varname_parts(param_name)
optimize_var_names = []
for op in self.get_origin_main_program().global_block().ops:
# check all optimizer op
if int(op.all_attrs()["op_role"]) == 2:
# check param name
if op.input("Param")[0] != origin_param_name:
continue
# check all input
for key in op.input_names:
if key in [
"Param", "Grad", "LearningRate", "Beta1Tensor",
"Beta2Tensor"
]:
continue
# check varibale shape related param, e.g: Moment1
optimize_var_names += self._get_optimizer_param_related_var_name(
op, op.type, key)
return optimize_var_names
def _get_optimizer_param_related_var_name(self, op, op_type, varkey):
"""
Returns the names for optimizer inputs that need to be load
"""
related_var_names = []
if op_type == "adam":
if varkey in ["Moment1", "Moment2"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "adagrad":
if varkey == "Moment":
related_var_names.append(op.input(varkey)[0])
elif op_type in ["momentum", "lars_momentum"]:
if varkey == "Velocity":
related_var_names.append(op.input(varkey)[0])
elif op_type == "rmsprop":
if varkey in ["Moment", "MeanSquare"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "ftrl":
if varkey in ["SquaredAccumulator", "LinearAccumulator"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "sgd":
pass
else:
raise ValueError(
"Not supported optimizer for distributed training: %s" %
op_type)
return related_var_names
def build_ctx(self,
vars,
mapping,
is_grad,
is_sparse,
is_send,
is_distributed=False):
def get_grad_var_ep(slices):
names = []
eps = []
sections = []
for slice in slices:
if self.is_geo_mode():
if is_send:
names.append("{}.delta".format(slice.name))
else:
names.append(slice.name)
elif is_grad and self.is_sync_mode() and self.get_trainers(
) > 1:
names.append("{}.trainer_{}".format(slice.name,
self.get_role_id()))
else:
names.append(slice.name)
sections.append(slice.shape[0])
for ep, pairs in self.param_grad_ep_mapping.items():
params, grads = pairs["params"], pairs["grads"]
for var in params + grads:
if slice.name == var.name:
eps.append(ep)
break
return names, eps, sections
if isinstance(vars, MergedVariable):
name = vars.merged_var.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [var.name for var in vars.ordered_vars]
else:
name = vars.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [vars.name]
trainer_id = self.get_role_id()
aggregate = True
ctx = CommContext(name, names, eps, sections, origin_varnames,
trainer_id, aggregate, is_sparse, is_distributed)
return ctx
def get_trainer_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if not self.is_geo_mode():
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param = merged[0]
grad = merged[1]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
if self.is_async_mode():
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for pairs in self.origin_sparse_pairs:
param, grad = pairs
param_name = param.name
is_distributed = True if param_name in distibuted_varnames else False
param_ctx = self.build_ctx(param, self.param_var_mapping, False,
True, True, is_distributed)
grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True,
True, True, is_distributed)
ctx = CommContext(param_ctx.var_name(),
param_ctx.split_varnames(),
param_ctx.split_endpoints(),
param_ctx.sections(),
grad_ctx.origin_varnames(),
param_ctx.trainer_id(),
param_ctx.aggregate(),
param_ctx.is_sparse(),
param_ctx.is_distributed())
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if self.is_geo_mode():
for pairs in self.merged_dense_pairs:
param = pairs[0]
ctx = self.build_ctx(param, self.param_var_mapping, False,
False, True)
send_ctx[ctx.var_name()] = ctx
for pairs in self.merged_sparse_pairs:
param = pairs[0]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param, grad = merged
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_recv_context(self,
recv_type=1,
use_origin_program=False):
# recv_type
# 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = []
for pairs in self.origin_sparse_pairs:
param, grad = pairs
sparse_varnames.append(param.name)
dense_recv_ctx = {}
sparse_recv_ctx = {}
distributed_recv_ctx = {}
variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs
for merged in variables_pairs:
params = merged[0]
if params.merged_var.name in sparse_varnames:
continue
ctx = self.build_ctx(params, self.param_var_mapping, False, False,
False, False)
dense_recv_ctx[ctx.var_name()] = ctx
for pairs in self.origin_sparse_pairs:
param, grad = pairs
if param.name in distibuted_varnames:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, True)
distributed_recv_ctx[ctx.var_name()] = ctx
else:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, False)
sparse_recv_ctx[ctx.var_name()] = ctx
if recv_type == 1:
return dense_recv_ctx
if recv_type == 2:
return sparse_recv_ctx
if recv_type == 3:
return distributed_recv_ctx
if recv_type == 4:
dense_recv_ctx.update(sparse_recv_ctx)
dense_recv_ctx.update(distributed_recv_ctx)
return dense_recv_ctx
assert ValueError(
"recv_type can only be 1/2/3/4, 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL"
)
def get_the_one_trainer_send_context(self, split_dense_table):
if self.is_geo_mode():
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
distibuted_varnames = get_sparse_tablenames(
self.origin_main_program, True)
for merged in self.merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel = reduce(lambda x, y: x * y, var.shape[1:])
sparse_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(send_ctx) == 0:
raise ValueError(
"GeoSGD require sparse parameters in your net.")
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
else:
return self.get_the_one_send_context(split_dense_table)
def get_dense_send_context(self,
send_ctx,
idx,
merged_dense_pairs,
trainer_id,
split_dense_table=False):
if len(merged_dense_pairs) < 1:
return idx
if not split_dense_table:
origin_varnames = []
var_numel = 0
for merged in merged_dense_pairs:
grad = merged[1]
origin_varnames.append(grad.merged_var.name)
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel += reduce(lambda x, y: x * y, var.shape)
grad_name = "Dense@Grad"
trainer_id = self.get_role_id()
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"],
[var_numel], origin_varnames, trainer_id,
aggregate, False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
else:
for merged in merged_dense_pairs:
grad = merged[1]
origin_varname = grad.merged_var.name
var = self.origin_main_program.global_block().vars[
origin_varname]
var_numel = reduce(lambda x, y: x * y, var.shape)
grad_name = origin_varname
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[origin_varname], trainer_id, aggregate,
False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
return idx
def get_the_one_send_context(self,
split_dense_table=False,
use_origin_program=False,
ep_list=None):
if ep_list is None:
ep_list = ["127.0.0.1:6071"]
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs
merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs
idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs,
trainer_id, split_dense_table)
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
for merged in merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
splited_varname = []
for i in range(len(ep_list)):
splited_varname.append("{}.block{}".format(param_name, i))
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
shape = list(var.shape)
shape[0] = 0 if is_distributed else shape[0]
sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape,
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_the_one_recv_context(self,
is_dense=True,
split_dense_table=False,
use_origin_program=False):
recv_id_maps = {}
if is_dense:
send_ctx = self.get_the_one_send_context(
split_dense_table=split_dense_table,
use_origin_program=use_origin_program)
for idx, (name, ctx) in enumerate(send_ctx.items()):
if ctx.is_sparse():
continue
if ctx.is_tensor_table():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
else:
send_ctx = self.get_the_one_send_context()
for idx, (name, ctx) in enumerate(send_ctx.items()):
if not ctx.is_sparse():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
return recv_id_maps
def get_server_runtime_config(self):
return self.strategy.get_server_runtime_config()
def get_var_distributed(self, varname, is_param):
var_distributed = []
offset = 0
if is_param:
params = self.param_var_mapping[varname]
param_varnames = [var.name for var in params]
for ep, pairs in self.param_grad_ep_mapping.items():
for p in pairs["params"]:
if p.name in param_varnames:
offset += p.shape[0]
var_distributed.append((p.name, ep, p.shape[0]))
else:
grads = self.grad_var_mapping[varname]
grad_varnames = [var.name for var in grads]
for ep, pairs in self.param_grad_ep_mapping.items():
for g in pairs["grads"]:
if g.name in grad_varnames:
var_distributed.append((g.name, ep, g.shape[0]))
return var_distributed
def _step_ctx(self, idx):
name = STEP_COUNTER
trainer_id = self.get_role_id()
endpoints = self.get_ps_endpoints()
sections = [1] * len(endpoints)
names = [name] * len(endpoints)
ctx = CommContext(name, names, endpoints, sections, [name], trainer_id,
True, False, False, idx, True)
return name, ctx
def _create_vars_from_blocklist(self, block_list):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, split in six.iteritems(block_map):
orig_var = self.merged_variable_map[varname]
if len(split) == 1:
var_mapping[varname] = [orig_var]
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=orig_var,
block_id=0,
offset=0,
is_slice=False,
vtype="Param")
else:
var_mapping[varname] = []
orig_shape = orig_var.shape
orig_dim1_flatten = 1
if len(orig_shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y,
orig_shape[1:])
for i, block in enumerate(split):
size = block[1]
rows = size // orig_dim1_flatten
splited_shape = [rows]
if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:])
new_var_name = "%s.block%d" % (varname, i)
slice_var = vars_metatools.VarStruct(
name=new_var_name,
shape=splited_shape,
dtype=orig_var.dtype,
type=orig_var.type,
lod_level=orig_var.lod_level,
persistable=False)
var_mapping[varname].append(slice_var)
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=slice_var,
block_id=i,
offset=-1,
is_slice=False,
vtype="Param")
return var_mapping
def _dispatcher(self):
ps_dispatcher = RoundRobin(self.get_ps_endpoints())
ps_dispatcher.reset()
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs]
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname in sparse_gradnames:
continue
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname not in sparse_gradnames:
continue
ps_dispatcher.reset()
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
def _slice_variable(self,
var_list,
slice_count,
min_block_size,
uniform=False):
"""
We may need to split dense tensor to one or more blocks and put
them equally onto parameter server. One block is a sub-tensor
aligned by dim[0] of the tensor.
We need to have a minimal block size so that the calculations in
the parameter server side can gain better performance. By default
minimum block size 8K elements (maybe 16bit or 32bit or 64bit).
Args:
var_list (list): List of variables.
slice_count (int): Numel of count that variables will be sliced, which
could be the pserver services' count.
min_block_size (int): Minimum split block size.
Returns:
blocks (list[(varname, block_id, current_block_size)]): A list
of VarBlocks. Each VarBlock specifies a shard of the var.
"""
blocks = []
for var in var_list:
if not uniform:
var_numel = reduce(lambda x, y: x * y, var.shape)
split_count = 1
if min_block_size == -1:
split_count = 1
else:
split_count = slice_count
max_pserver_count = int(
math.floor(var_numel / float(min_block_size)))
if max_pserver_count == 0:
max_pserver_count = 1
if max_pserver_count < slice_count:
split_count = max_pserver_count
block_size = int(math.ceil(var_numel / float(split_count)))
if len(var.shape) >= 2:
# align by dim1(width)
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
remains = block_size % dim1
if remains != 0:
block_size += dim1 - remains
# update split_count after aligning
split_count = int(math.ceil(var_numel / float(block_size)))
for block_id in range(split_count):
curr_block_size = min(block_size, var_numel - (
(block_id) * block_size))
block = vars_metatools.VarBlock(var.name, block_id,
curr_block_size)
blocks.append(str(block))
else:
block_size = var.shape[0] / slice_count
remainder = var.shape[0] % slice_count
if block_size == 0:
dim0s = [block_size] * remainder
else:
dim0s = [block_size] * slice_count
for i in range(remainder):
dim0s[i] = dim0s[i] + 1
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
for block_id in range(len(dim0s)):
numel = dim0s[block_id] * dim1
block = vars_metatools.VarBlock(var.name, block_id, numel)
blocks.append(str(block))
return blocks
def _get_param_grad_blocks(self, pairs, min_block_size, uniform=False):
param_list = []
grad_list = []
param_grad_set = set()
for p, g in pairs:
# todo(tangwei12) skip parameter marked not trainable
# if type(p) == Parameter and p.trainable == False:
# continue
p = p.merged_var
g = g.merged_var
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = self._slice_variable(grad_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
param_blocks = self._slice_variable(param_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
return param_blocks, grad_blocks
def _var_slice_and_distribute(self):
# update these mappings for further transpile:
# 1. param_var_mapping : param var name->[split params vars]
# 2. grad_var_mapping : grad var name->[split grads vars]
# 3. grad_param_mapping : grad.blockx->param.blockx
# 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] }
dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs,
self.min_block_size, False)
sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs,
self.min_block_size, True)
param_blocks = dps + sps
grad_blocks = dgs + sgs
assert (len(grad_blocks) == len(param_blocks))
# origin_param_name->[splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(param_blocks)
self.grad_var_mapping = self._create_vars_from_blocklist(grad_blocks)
# dict(grad_splited_var->param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \
self.param_var_mapping[p_name][int(p_bid)]
print_maps = {}
for k, v in self.grad_param_mapping.items():
print_maps[str(k)] = str(v)
# create mapping of endpoint->split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({
ep: {
"params": [],
"grads": []
}
}) for ep in self.get_ps_endpoints()
]
def _build_var_distributed(self):
self.var_distributed = vars_metatools.VarsDistributed()
sparse_pairs, dense_pairs = self.get_param_grads()
origin_for_sparse = []
origin_for_dense = []
param_name_grad_name = dict()
grad_name_to_param_name = dict()
for param, grad in sparse_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_sparse.append((param, grad))
for param, grad in dense_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_dense.append((param, grad))
for dense_pair in origin_for_dense:
param, grad = dense_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_dense_pairs.append((m_param, m_grad))
for sparse_pair in origin_for_sparse:
param, grad = sparse_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_sparse_pairs.append((m_param, m_grad))
for merged in self.merged_variables_pairs:
m_param, m_grad = merged
self.merged_variable_map[
m_param.merged_var.name] = m_param.merged_var
self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var
param_merges = []
param_merges.extend(origin_for_sparse)
param_merges.extend(origin_for_dense)
for param, grad in param_merges:
param_name_grad_name[param.name] = grad.name
grad_name_to_param_name[grad.name] = param.name
self.origin_sparse_pairs = origin_for_sparse
self.origin_dense_pairs = origin_for_dense
self.param_name_to_grad_name = param_name_grad_name
self.grad_name_to_param_name = grad_name_to_param_name
sparse_pair_map = collections.OrderedDict()
for pair in self.origin_sparse_pairs + self.origin_dense_pairs:
param, grad = pair
sparse_pair_map[param.name] = str(param)
sparse_pair_map[grad.name] = str(grad)
self._var_slice_and_distribute()
self._dispatcher()
def get_param_grads(self):
origin_program = self.origin_main_program
def _get_params_grads(sparse_varnames):
block = origin_program.global_block()
dense_param_grads = []
sparse_param_grads = []
optimize_params = set()
origin_var_dict = origin_program.global_block().vars
role_id = int(core.op_proto_and_checker_maker.OpRole.Backward)
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr("op_role", role_id)
continue
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if param_name not in optimize_params:
optimize_params.add(param_name)
param_grad = (origin_var_dict[param_name],
origin_var_dict[grad_name])
if param_name in sparse_varnames:
sparse_param_grads.append(param_grad)
else:
dense_param_grads.append(param_grad)
return sparse_param_grads, dense_param_grads
def _get_sparse_varnames():
varnames = []
for op in origin_program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
varnames.append(param_name)
return list(set(varnames))
sparse_varnames = _get_sparse_varnames()
sparse_param_grads, dense_param_grads = _get_params_grads(
sparse_varnames)
return sparse_param_grads, dense_param_grads
def remove_var_pair_by_grad(self, var_name):
for index, pair in enumerate(self.merged_variables_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_variables_pairs[index]
for index, pair in enumerate(self.merged_dense_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_dense_pairs[index]
return
for index, pair in enumerate(self.merged_sparse_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_sparse_pairs[index]
return
print("Not find {} in self.merge_pairs".format(var_name))
def _is_opt_role_op(op):
# NOTE : depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
return False
def _get_optimize_ops(_program):
block = _program.global_block()
opt_ops = []
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr(
"op_role",
int(core.op_proto_and_checker_maker.OpRole.Backward))
continue
opt_ops.append(op)
return opt_ops
def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps):
if hasattr(compiled_config.origin_main_program, 'lr_sheduler'):
from paddle.optimizer.lr import LRScheduler
assert isinstance(compiled_config.origin_main_program.lr_sheduler,
LRScheduler), "must be LRScheduler"
ops = _get_optimize_ops(compiled_config.origin_main_program)
lr_param_dict = _get_lr_param_dict(ops)
lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program(
compiled_config.origin_main_program.lr_sheduler, lr_param_dict,
lr_decay_steps)
compiled_config.add_tensor_table(
"@LR_DECAY_COUNTER@", lr_name, lr_decay_startup_program,
lr_decay_main_program, "GlobalStepTable")
def _get_lr_param_dict(opt_ops):
lr_param_dict = {}
for op in opt_ops:
lr_name = op.input("LearningRate")[0]
param_name = op.input("Param")[0]
if lr_name not in lr_param_dict:
lr_param_dict[lr_name] = []
lr_param_dict[lr_name].append(param_name)
return lr_param_dict
def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
schedler_decay = [
'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay'
]
from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay
from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay
decay_main_program = fluid.framework.Program()
decay_startup_program = fluid.framework.Program()
lr_name = ""
if isinstance(lr_sheduler, ExponentialDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, NoamDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0)
lr_name = lr.name
logging.warn("NoamDecay is set, warmup steps is [ %d ]" %
lr_sheduler.warmup_steps)
elif isinstance(lr_sheduler, NaturalExpDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, InverseTimeDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma,
True)
lr_name = lr.name
logging.warn(
"InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
else:
raise ValueError(
"Not supported current LearningRate strategy, please use follow decay strategy: {}".
format(schedler_decay))
return decay_main_program, decay_startup_program, lr_name
def _get_varname_parts(varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1:]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1:trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0:min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _orig_varname(varname):
orig, _, _ = _get_varname_parts(varname)
return orig
|
apache-2.0
| -7,821,824,388,392,121,000 | 38.75695 | 145 | 0.537979 | false |
yittg/Snipping
|
snipping/prompt_toolkit/layout.py
|
1
|
5507
|
"""snipping.prompt_toolkit.layout
wrappers for layout
"""
from prompt_toolkit.key_binding import vi_state
from prompt_toolkit.layout import containers
from prompt_toolkit.layout import controls
from prompt_toolkit.layout import dimension
from prompt_toolkit.layout import highlighters
from prompt_toolkit.layout import margins
from prompt_toolkit.layout import processors
from prompt_toolkit.layout import screen
from prompt_toolkit.layout import toolbars
from snipping.prompt_toolkit import style
from snipping.prompt_toolkit import buffers
class NumberredMargin(margins.NumberredMargin):
""" A simple and customized `create_margin` of origin `NumberredMargin`
"""
def create_margin(self, cli, wr_info, width, height):
visible_line_to_input_line = wr_info.visible_line_to_input_line
token = style.Token.LineNumber
token_error = style.ErrorLineNo
result = []
app = cli.application
snippet = buffers.get_content(app)
cp = app.engine.compile(snippet)
for y in range(wr_info.window_height):
line_number = visible_line_to_input_line.get(y)
if line_number is not None:
if cp is not None and line_number + 1 == cp:
result.append((token_error,
('%i ' % (line_number + 1)).rjust(width)))
else:
result.append((token,
('%i ' % (line_number + 1)).rjust(width)))
result.append((style.Token, '\n'))
return result
def dim(min_=None, max_=None, exact=None):
if exact is not None:
return dimension.LayoutDimension.exact(exact)
return dimension.LayoutDimension(min=min_, max=max_)
def horizontal_line(min_width=None, max_width=None, char=' '):
height = dim(exact=1)
width = dim(min_=min_width, max_=max_width)
content = controls.FillControl(char, token=style.Line)
return containers.Window(width=width, height=height, content=content)
def vertical_line(min_height=None, max_height=None, char=' '):
width = dim(exact=1)
height = dim(min_=min_height, max_=max_height)
content = controls.FillControl(char, token=style.Line)
return containers.Window(width=width, height=height, content=content)
def text_window_bar(name=None, key_binding_manager=None):
def get_tokens(cli):
text_style = style.Bar.Text
display_text, read_only = buffers.buffer_display(cli.application, name)
if not read_only and cli.current_buffer_name == name:
vi_mode = key_binding_manager.get_vi_state(cli).input_mode
if vi_mode == vi_state.InputMode.INSERT:
text_style = style.Bar.Hl_Text
tokens = [(text_style, display_text),
(text_style, u' \u2022 ')]
if vi_mode == vi_state.InputMode.INSERT:
tokens.append((text_style, 'INSERT'))
elif vi_mode == vi_state.InputMode.NAVIGATION:
tokens.append((text_style, 'NORMAL'))
else:
tokens.append((text_style, '[ ]'))
return tokens
else:
return [(text_style, display_text)]
return toolbars.TokenListToolbar(
get_tokens, default_char=screen.Char(' ', style.Bar.Text))
def normal_text_window(name=None, lang=None, lineno=False,
leading_space=False, trailing_space=False,
width=None, height=None):
if name is None:
name = buffers.DEFAULT_BUFFER
bf_attrs = {'buffer_name': name,
'lexer': style.get_lexer_by_lang(lang),
'highlighters': [highlighters.SelectionHighlighter()]}
input_processors = []
if leading_space:
input_processors.append(processors.ShowLeadingWhiteSpaceProcessor())
if trailing_space:
input_processors.append(processors.ShowTrailingWhiteSpaceProcessor())
if input_processors:
bf_attrs['input_processors'] = input_processors
win_attrs = {}
left_margins = []
if lineno:
left_margins.append(NumberredMargin(name))
if left_margins:
win_attrs['left_margins'] = left_margins
if height is not None:
win_attrs['height'] = height
if width is not None:
win_attrs['width'] = width
content = controls.BufferControl(**bf_attrs)
return containers.Window(content=content, **win_attrs)
def horizontal_tokenlist_window(get_tokens, align='left'):
tlc_attrs = {}
if align == 'center':
tlc_attrs['align_center'] = True
if align == 'right':
tlc_attrs['align_right'] = True
height = dim(exact=1)
content = controls.TokenListControl(get_tokens, **tlc_attrs)
return containers.Window(height=height, content=content)
def window_rows(windows):
return containers.HSplit(windows)
def window_columns(windows):
return containers.VSplit(windows)
def text_window_with_bar(name=None, lang=None, lineno=False,
leading_space=False, trailing_space=False,
width=None, height=None, key_binding_manager=None):
if name is None:
name = buffers.DEFAULT_BUFFER
return window_rows([
normal_text_window(
name=name, lang=lang, lineno=lineno,
leading_space=leading_space, trailing_space=trailing_space,
width=width, height=height),
text_window_bar(name=name, key_binding_manager=key_binding_manager),
])
|
mit
| -1,226,969,072,952,198,400 | 33.85443 | 79 | 0.632831 | false |
praetorian-inc/pentestly
|
modules/reporting/xml.py
|
1
|
1655
|
from recon.core.module import BaseModule
from dicttoxml import dicttoxml
from xml.dom.minidom import parseString
import codecs
import os
class Module(BaseModule):
meta = {
'name': 'XML Report Generator',
'author': 'Eric Humphries (@e2fsck) and Tim Tomes (@LaNMaSteR53)',
'version': 'v0.0.2',
'description': 'Creates a XML report.',
'options': (
('tables', 'hosts, contacts, credentials', True, 'comma delineated list of tables'),
('filename', os.path.join(BaseModule.workspace, 'results.xml'), True, 'path and filename for report output'),
),
}
def module_run(self):
filename = self.options['filename']
with codecs.open(filename, 'wb', encoding='utf-8') as outfile:
# build a list of table names
tables = [x.strip() for x in self.options['tables'].split(',')]
data_dict = {}
cnt = 0
for table in tables:
data_dict[table] = []
columns = [x[0] for x in self.get_columns(table)]
rows = self.query('SELECT "%s" FROM "%s" ORDER BY 1' % ('", "'.join(columns), table))
for row in rows:
row_dict = {}
for i in range(0,len(columns)):
row_dict[columns[i]] = row[i]
data_dict[table].append(row_dict)
cnt += 1
# write the xml to a file
reparsed = parseString(dicttoxml(data_dict))
outfile.write(reparsed.toprettyxml(indent=' '*4))
self.output('%d records added to \'%s\'.' % (cnt, filename))
|
gpl-3.0
| 5,376,551,073,993,372,000 | 40.375 | 121 | 0.538973 | false |
MITHyperloopTeam/software_core
|
software/UI/pod_tube_vis.py
|
1
|
8129
|
#!/usr/bin/env python
#signif reference to http://pastebin.com/k87sfiEf
import sys
import math
import signal
import time
import os
import math, random
import numpy as np
from numpy import linalg
from PIL import Image
#interface stuff
from PyQt4 import QtCore, QtGui, QtOpenGL
import pyqtgraph as pg
#comms stuff
import lcm
from mithl import floating_base_t
from mithl import particle_filter_t
from mithl import vectorXf_t
from mithl import state_estimator_particle_set
from mithl import state_estimator_particle
from lcm_utils import *
#read yaml config information
import yaml
class PodTubeVisWidget(QtGui.QWidget):
''' Pod Visualization window. Plots pod state with pyqtgraph.'''
def __init__(self, config, lc=None, parent=None, name=None):
super(PodTubeVisWidget, self).__init__(parent)
self.lc = lc
if name:
self.setObjectName(name)
self.startTime = time.time()
self.config = config
self.setMinimumHeight(200)
self.plot = pg.PlotWidget(title="State Estimation")
self.plot.setXRange(0,float(config['tube']['length']),padding=0.1)
self.plot.hideAxis("left")
img = QtGui.QImage("../models/pod.png")
img = img.convertToFormat(QtGui.QImage.Format_ARGB32_Premultiplied)
img = img.rgbSwapped()
img = img.mirrored(False, True)
imgArray = np.float64(pg.imageToArray(img, copy=True))
self.img_mle = pg.ImageItem(imgArray, opacity=0.0)
self.img_gt = pg.ImageItem(imgArray, opacity=0.9)
self.img_aspect = float(imgArray.shape[1]) / float(imgArray.shape[0])
self.pod_mle = 0.0
self.pod_gt = 0.0
self.viewBox = self.plot.getViewBox()
self.viewBox.setMouseEnabled(x=True, y=False)
self.viewBox.setYRange(-0.5, 0.5)
self.viewBox.setBackgroundColor([50, 80, 80])
# add a nice gradient background
self.gradBackground = QtGui.QGraphicsRectItem(0, -1, config["tube"]["length"], 2)
gradient = QtGui.QLinearGradient(0, -1, 0, 2)
gradient.setColorAt(0.0, QtGui.QColor(50, 50, 50))
gradient.setColorAt(1.0, QtGui.QColor(40, 40, 160))
self.gradBackground.setBrush(QtGui.QBrush(gradient))
self.viewBox.addItem(self.gradBackground)
# add the fiducial markers at half opacity
line_center = config["tube"]["length"] - config["tube"]["distance_after_last_fiducial"]
self.lines = []
self.lineColor = QtGui.QColor(200, 200, 0)
self.lineWidth = config["tube"]["fiducial_width"]
while (line_center > 0):
line = QtGui.QGraphicsLineItem(line_center, -1.0, line_center, 1.0)
self.lines.append(line)
self.viewBox.addItem(line)
line_center -= config["tube"]["fiducial_separation"]
# add the keep-outs and back and front
backZone = QtGui.QGraphicsRectItem(-50000, -1, 50000, 2)
#backZone.setPen(QtCore.Qt.NoPen)
backZone.setBrush(QtGui.QBrush(QtGui.QColor(200, 50, 50), QtCore.Qt.Dense1Pattern))
self.viewBox.addItem(backZone)
frontZone = QtGui.QGraphicsRectItem(config["tube"]["length"], -1, 50000, 2)
#backZone.setPen(QtCore.Qt.NoPen)
frontZone.setBrush(QtGui.QBrush(QtGui.QColor(200, 50, 50), QtCore.Qt.Dense1Pattern))
self.viewBox.addItem(frontZone)
self.particles = np.zeros((0, 3))
self.particles_scatter = pg.ScatterPlotItem()
self.viewBox.addItem(self.particles_scatter)
self.viewBox.addItem(self.img_mle)
self.viewBox.addItem(self.img_gt)
self.densityCurve = self.plot.plot([0], [0], pen=pg.mkPen([255, 0, 0]))
#self.setWindowTitle("BrakingSliders")
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.plot)
self.setLayout(mainLayout)
self.podse_sub = self.lc.subscribe("_FC_SE", self.handle_state_estimate)
self.podfb_sub = self.lc.subscribe("SIM_FB", self.handle_ground_truth)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(33)
def update(self):
viewXSize = float(self.viewBox.viewRange()[0][1] - self.viewBox.viewRange()[0][0])
viewYSize = float(self.viewBox.viewRange()[1][1] - self.viewBox.viewRange()[1][0])
# given MLE, make view range track pod
xmin, xmax = self.viewBox.viewRange()[0]
borderRatio = 0.4
softxmax = borderRatio*xmin + (1.0-borderRatio)*xmax
softxmin = (1.0-borderRatio)*xmin + borderRatio*xmax
if (self.pod_gt - softxmax >= 0):
xmin += (self.pod_gt - softxmax)*0.25
xmax += (self.pod_gt - softxmax)*0.25
elif (self.pod_gt - softxmin <= 0):
xmin += (self.pod_gt - softxmin)*0.25
xmax += (self.pod_gt - softxmin)*0.25
self.viewBox.setRange(xRange=(xmin, xmax), padding=0.0)
# might need to generate these
viewXSize = xmax - xmin
minScale = 1./10.0
# draw as either 2.0 meters long, or minScale total line length, preserving
# aspect ratio in view pixel
actualViewRatio = self.viewBox.viewPixelSize() # size of one screen pixel in view coords
viewRatioAdjustment = float(actualViewRatio[0]) / float(actualViewRatio[1])
mleDrawLength = max(2.0, viewXSize * minScale)
mleDrawHeight = mleDrawLength * self.img_aspect / viewRatioAdjustment
mleDrawX = self.pod_mle - mleDrawLength / 2.
mleDrawY = viewYSize/10.0 + self.viewBox.viewRange()[1][0] - mleDrawHeight / 2.0
mleDrawRect = QtCore.QRectF(mleDrawX, mleDrawY, mleDrawLength, mleDrawHeight)
self.img_mle.setRect(mleDrawRect)
gtDrawX = self.pod_gt - mleDrawLength / 2.
gtDrawRect = QtCore.QRectF(gtDrawX, mleDrawY, mleDrawLength, mleDrawHeight)
self.img_gt.setRect(gtDrawRect)
for line in self.lines:
# lines must be at least 1 px wide
line.setPen(QtGui.QPen(self.lineColor, max(self.lineWidth, actualViewRatio[0]) , QtCore.Qt.SolidLine))
if len(self.particles) > 0:
weights = np.array([p[2] for p in self.particles])
normalized_weights = weights / np.max(weights)
self.particles_scatter.setData(np.array([p[0][0] for p in self.particles]), 0.5*normalized_weights-0.25, pen=pg.mkPen([0, 125, 255, 150], width=5))
# build up sample points
densityX = np.array([xmin, xmax])
for p in self.particles:
densityX = np.append(densityX, np.arange(p[0][0]-p[1][0][0]*4, p[0][0]+p[1][0][0]*4, max(p[1][0][0]/2, 0.01)))
densityX = np.sort(densityX)
densityY = np.zeros(densityX.shape)
for p in self.particles:
densityY += p[2] * np.exp( - (densityX - p[0][0])**2 / p[1][0][0]**2) / np.sqrt(2 * math.pi * max(p[1][0][0]/2, 0.01)**2)
densityY /= np.max(densityY)*1.5
densityY -= -mleDrawY
self.densityCurve.setData(densityX, densityY)
def handle_state_estimate(self, channel, data):
msg = state_estimator_particle_set.decode(data)
self.pod_mle = msg.particles[0].mu[0]
particles = []
for i in range(msg.n_particles):
if msg.particles[i].id >= 0 and msg.particles[i].weight > 0.:
particles.append([msg.particles[i].mu,
msg.particles[i].Sigma,
msg.particles[i].weight])
self.particles = particles
self.pod_gt = msg.particles[0].mu[0]
def handle_ground_truth(self, channel, data):
msg = floating_base_t.decode(data)
#self.pod_gt = msg.q[0]
if __name__ == '__main__':
# hook up interrupt signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
with open('../config/simConfig.yaml', 'r') as f:
config = yaml.load(f)
lc = create_lcm()
app = QtGui.QApplication(sys.argv)
window = PodTubeVisWidget(config, lc=lc)
window.show()
start_lcm(lc)
sys.exit(app.exec_())
|
lgpl-3.0
| 106,389,324,820,216,420 | 36.634259 | 159 | 0.62111 | false |
manz/python-mapnik
|
test/python_tests/sqlite_test.py
|
1
|
20465
|
#!/usr/bin/env python
from __future__ import print_function
from nose.tools import eq_, raises
from .utilities import execution_path, run_all
import os
import mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def teardown():
index = '../data/sqlite/world.sqlite.index'
if os.path.exists(index):
os.unlink(index)
if 'sqlite' in mapnik.DatasourceCache.plugin_names():
def test_attachdb_with_relative_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
test_attachdb_with_relative_file.requires_data = True
def test_attachdb_with_multiple_files():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch1@:memory:,scratch2@:memory:',
initdb='''
create table scratch1.attachedtest (the_geom);
create virtual table scratch2.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch2.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
# the above should not throw but will result in no features
eq_(feature,None)
test_attachdb_with_multiple_files.requires_data = True
def test_attachdb_with_absolute_file():
# The point table and index is in the qgis_spatiallite.sqlite
# database. If either is not found, then this fails
ds = mapnik.SQLite(file=os.getcwd() + '/../data/sqlite/world.sqlite',
table='point',
attachdb='scratch@qgis_spatiallite.sqlite'
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['pkuid'],1)
test_attachdb_with_absolute_file.requires_data = True
def test_attachdb_with_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.idx_attachedtest_the_geom using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.idx_attachedtest_the_geom values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_attachdb_with_index.requires_data = True
def test_attachdb_with_explicit_index():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='attachedtest',
index_table='myindex',
attachdb='scratch@:memory:',
initdb='''
create table scratch.attachedtest (the_geom);
create virtual table scratch.myindex using rtree(pkid,xmin,xmax,ymin,ymax);
insert into scratch.myindex values (1,-7799225.5,-7778571.0,1393264.125,1417719.375);
'''
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_attachdb_with_explicit_index.requires_data = True
def test_attachdb_with_sql_join():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
fs = ds.featureset()
feature = fs.next()
eq_(feature.id(),1)
expected = {
1995:0,
1996:0,
1997:0,
1998:0,
1999:0,
2000:0,
2001:0,
2002:0,
2003:0,
2004:0,
2005:0,
2006:0,
2007:0,
2008:0,
2009:0,
2010:0,
# this appears to be sqlites way of
# automatically handling clashing column names
'ISO3:1':'ATG',
'OGC_FID':1,
'area':44,
'fips':u'AC',
'iso2':u'AG',
'iso3':u'ATG',
'lat':17.078,
'lon':-61.783,
'name':u'Antigua and Barbuda',
'pop2005':83039,
'region':19,
'subregion':29,
'un':28
}
for k,v in expected.items():
try:
eq_(feature[str(k)],v)
except:
#import pdb;pdb.set_trace()
print('invalid key/v %s/%s for: %s' % (k,v,feature))
test_attachdb_with_sql_join.requires_data = True
def test_attachdb_with_sql_join_count():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3 limit 100)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),100)
test_attachdb_with_sql_join_count.requires_data = True
def test_attachdb_with_sql_join_count2():
'''
sqlite3 world.sqlite
attach database 'business.sqlite' as business;
select count(*) from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
test_attachdb_with_sql_join_count2.requires_data = True
def test_attachdb_with_sql_join_count3():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects!) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),192)
test_attachdb_with_sql_join_count3.requires_data = True
def test_attachdb_with_sql_join_count4():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! limit 1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
eq_(len(ds.fields()),29)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat', 'ISO3:1', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float', 'str', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int', 'int'])
eq_(len(ds.all_features()),1)
test_attachdb_with_sql_join_count4.requires_data = True
def test_attachdb_with_sql_join_count5():
'''
select count(*) from (select * from world_merc where 1=1) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3;
'''
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from (select * from world_merc where !intersects! and 1=2) as world_merc INNER JOIN business on world_merc.iso3 = business.ISO3)',
attachdb='[email protected]'
)
# nothing is able to join to business so we don't pick up business schema
eq_(len(ds.fields()),12)
eq_(ds.fields(),['OGC_FID', 'fips', 'iso2', 'iso3', 'un', 'name', 'area', 'pop2005', 'region', 'subregion', 'lon', 'lat'])
eq_(ds.field_types(),['int', 'str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float'])
eq_(len(ds.all_features()),0)
test_attachdb_with_sql_join_count5.requires_data = True
def test_subqueries():
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='world_merc',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select * from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
eq_(feature['iso2'],u'AG')
eq_(feature['iso3'],u'ATG')
eq_(feature['un'],28)
eq_(feature['name'],u'Antigua and Barbuda')
eq_(feature['area'],44)
eq_(feature['pop2005'],83039)
eq_(feature['region'],19)
eq_(feature['subregion'],29)
eq_(feature['lon'],-61.783)
eq_(feature['lat'],17.078)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select OGC_FID,GEOMETRY from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(len(feature),1)
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['OGC_FID'],1)
eq_(feature['fips'],u'AC')
# same as above, except with alias like postgres requires
# TODO - should we try to make this work?
#ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
# table='(select GEOMETRY,rowid as aliased_id,fips from world_merc) as table',
# key_field='aliased_id'
# )
#fs = ds.featureset()
#feature = fs.next()
#eq_(feature['aliased_id'],1)
#eq_(feature['fips'],u'AC')
ds = mapnik.SQLite(file='../data/sqlite/world.sqlite',
table='(select GEOMETRY,OGC_FID,OGC_FID as rowid,fips from world_merc)',
)
fs = ds.featureset()
feature = fs.next()
eq_(feature['rowid'],1)
eq_(feature['fips'],u'AC')
test_subqueries.requires_data = True
def test_empty_db():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
fs = ds.featureset()
feature = None
try:
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_empty_db.requires_data = True
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='empty',
)
eq_(len(ds.fields()),25)
eq_(ds.fields(),['OGC_FID', 'scalerank', 'labelrank', 'featurecla', 'sovereignt', 'sov_a3', 'adm0_dif', 'level', 'type', 'admin', 'adm0_a3', 'geou_dif', 'name', 'abbrev', 'postal', 'name_forma', 'terr_', 'name_sort', 'map_color', 'pop_est', 'gdp_md_est', 'fips_10_', 'iso_a2', 'iso_a3', 'iso_n3'])
eq_(ds.field_types(),['int', 'int', 'int', 'str', 'str', 'str', 'float', 'float', 'str', 'str', 'str', 'float', 'str', 'str', 'str', 'str', 'str', 'str', 'float', 'float', 'float', 'float', 'str', 'str', 'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
ds.features(query)
test_that_nonexistant_query_field_throws.requires_data = True
def test_intersects_token1():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token1.requires_data = True
def test_intersects_token2():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token2.requires_data = True
def test_intersects_token3():
ds = mapnik.SQLite(file='../data/sqlite/empty.db',
table='(select * from empty where "a"!="b" and !intersects!)',
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
test_intersects_token3.requires_data = True
# https://github.com/mapnik/mapnik/issues/1537
# this works because key_field is manually set
def test_db_with_one_text_column():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (alias TEXT,geometry BLOB);
insert into test1 values ("test",x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='alias'
)
eq_(len(ds.fields()),1)
eq_(ds.fields(),['alias'])
eq_(ds.field_types(),['str'])
fs = ds.all_features()
eq_(len(fs),1)
feat = fs[0]
eq_(feat.id(),0) # should be 1?
eq_(feat['alias'],'test')
eq_(feat.geometry.to_wkt(),'POINT(0 0)')
def test_db_with_one_untyped_column():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (geometry BLOB, untyped);
insert into test1 values (x'%s', 'untyped');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='rowid'
)
# ensure the untyped column is found
eq_(len(ds.fields()),2)
eq_(ds.fields(),['rowid', 'untyped'])
eq_(ds.field_types(),['int', 'str'])
def test_db_with_one_untyped_column_using_subquery():
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
ds = mapnik.SQLite(file=':memory:',
table='(SELECT rowid, geometry, untyped FROM test1)',
initdb='''
create table test1 (geometry BLOB, untyped);
insert into test1 values (x'%s', 'untyped');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='rowid'
)
# ensure the untyped column is found
eq_(len(ds.fields()),3)
eq_(ds.fields(),['rowid', 'untyped', 'rowid'])
eq_(ds.field_types(),['int', 'str', 'int'])
def test_that_64bit_int_fields_work():
ds = mapnik.SQLite(file='../data/sqlite/64bit_int.sqlite',
table='int_table',
use_spatial_index=False
)
eq_(len(ds.fields()),3)
eq_(ds.fields(),['OGC_FID','id','bigint'])
eq_(ds.field_types(),['int','int','int'])
fs = ds.featureset()
feat = fs.next()
eq_(feat.id(),1)
eq_(feat['OGC_FID'],1)
eq_(feat['bigint'],2147483648)
feat = fs.next()
eq_(feat.id(),2)
eq_(feat['OGC_FID'],2)
eq_(feat['bigint'],922337203685477580)
test_that_64bit_int_fields_work.requires_data = True
def test_null_id_field():
# silence null key warning: https://github.com/mapnik/mapnik/issues/1889
default_logging_severity = mapnik.logger.get_severity()
mapnik.logger.set_severity(mapnik.severity_type.none)
# form up an in-memory test db
wkb = '010100000000000000000000000000000000000000'
# note: the osm_id should be declared INTEGER PRIMARY KEY
# but in this case we intentionally do not make this a valid pkey
# otherwise sqlite would turn the null into a valid, serial id
ds = mapnik.SQLite(file=':memory:',
table='test1',
initdb='''
create table test1 (osm_id INTEGER,geometry BLOB);
insert into test1 values (null,x'%s');
''' % wkb,
extent='-180,-60,180,60',
use_spatial_index=False,
key_field='osm_id'
)
fs = ds.featureset()
feature = None
try :
feature = fs.next()
except StopIteration:
pass
eq_(feature,None)
mapnik.logger.set_severity(default_logging_severity)
if __name__ == "__main__":
setup()
result = run_all(eval(x) for x in dir() if x.startswith("test_"))
teardown()
exit(result)
|
lgpl-2.1
| 4,323,102,137,931,213,300 | 39.766932 | 305 | 0.538334 | false |
aptomar/apt-file-format
|
test/testAptofile.py
|
1
|
23249
|
################################################################
# #
# testAptofile.py #
# Copyright (c) 2013 Aptomar AS, All Rights Reserved #
# #
# Author: Jarle Bauck Hamar: <[email protected]> #
# Date: 2013-05-23 #
# #
################################################################
import unittest
import sys
import json
sys.path.append('../src')
from aptofile import Aptofile
import jsonschema
class TestManifest(unittest.TestCase):
def setUp(self):
with open('tests/header.json') as fid:
self.inst = json.load(fid)
self.schema = Aptofile.SCHEMA
def validate(self):
try:
jsonschema.validate(self.inst, self.schema, Aptofile.VALIDATOR,
format_checker = jsonschema.FormatChecker())
except jsonschema.ValidationError:
return False
return True
def test_schema_validates(self):
Aptofile.VALIDATOR.check_schema(Aptofile.SCHEMA)
def test_valid_manifest_header(self):
self.assertTrue(self.validate())
def test_manifest_missing_date(self):
del self.inst["date"]
self.assertFalse(self.validate())
def test_manifest_missing_description(self):
del self.inst["description"]
self.assertFalse(self.validate())
def test_manifest_missing_version(self):
del self.inst["manifest_version"]
self.assertFalse(self.validate())
def test_manifest_missing_generator(self):
del self.inst["generator"]
self.assertFalse(self.validate())
def test_manifest_bad_date(self):
self.inst["date"] = "tomorrow"
self.assertFalse(self.validate())
def test_manifest_disallow_additional_properties(self):
self.inst["extra"] = "large"
self.assertFalse(self.validate())
class TestAsset(unittest.TestCase):
def testCreateAsset(self):
f = 'tests/asset.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'file:/layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
#Validate after write and open
self.assertTrue(Aptofile.validateFile(f))
def testAssetMissingFile(self):
f = 'tests/asset_missing_file.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
af.addFile2Layer('resource3.png','layer2','resources', writeFile=False)
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectLayerInGroup(self):
f = 'tests/asset_incorrect_layer_in_group.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer3'])
#Validate before write:
self.assertFalse(af.validate())
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetMissingStyle(self):
f = 'tests/asset_missing_style.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
del af.manifest['asset']['layers']['layer1']['style']
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectDataType(self):
f = 'tests/asset_incorrect_data_type.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
d=af.manifest['asset']['layers']['layer1']['style']['data'].pop()
af.manifest['asset']['layers']['layer1']['style']['data'] = d
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
class TestImage(unittest.TestCase):
def testImage(self):
f = 'tests/image.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testImageMissingDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['image']['created']
self.assertFalse(Aptofile.validateFile(f))
def testImageIncorrectDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
af.manifest['image']['created'] = '23.05.13'
af.validate()
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingFileAndGenerator(self):
f = 'tests/image_missing_file_and_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.manifest['image']['data']=['image.jpg']
del af.manifest['generator']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingGenerator(self):
f = 'tests/image_missing_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['generator']
self.assertFalse(Aptofile.validateFile(f))
class testVideo(unittest.TestCase):
def testVideo(self):
f = 'tests/video.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testVideoMissingFile(self):
f = 'tests/video_missing_file.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoFileNotFound(self):
f = 'tests/video_file_not_found.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.manifest['video']['data']=['video.avi']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoMissingName(self):
f = 'tests/video_missing_name.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
del af.manifest['video']['name']
self.assertFalse(Aptofile.validateFile(f))
class TestPoint(unittest.TestCase):
def testPoint(self):
f = 'tests/point.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testPointInvalidType(self):
f = 'tests/point_invalid_type.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
af.manifest['point']['object-type'] = 'UFO'
self.assertFalse(Aptofile.validateFile(f))
def testRoute(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testRouteMissingGeometry(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
del af.manifest['route']['geometry']
self.assertFalse(Aptofile.validateFile(f))
class TestArea(unittest.TestCase):
def testArea(self):
f = 'tests/area.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of the area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testAreaMissingAreaDescription(self):
f = 'tests/area_missing_area_desc.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of a area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
del af.manifest['area']['description']
self.assertFalse(Aptofile.validateFile(f))
if __name__=='__main__':
unittest.main()
|
bsd-3-clause
| -8,490,564,460,999,139,000 | 46.543967 | 94 | 0.555422 | false |
mahrz/flintstone
|
flintstone/widget.py
|
1
|
6673
|
from curtsies import FSArray, fmtstr
from .layout_manager import LayoutManager, OverlayLayout
from .utils import wrap, box, fmtfsa, center, blit
__author__ = 'malte'
class Decorator(object):
def __init__(self, fg=None, bg=None):
self.fg = fg
self.bg = bg
self.kwargs = {}
if fg is not None:
self.kwargs['fg'] = fg
if bg is not None:
self.kwargs['bg'] = bg
def format_str(self, s):
return fmtstr(s, **self.kwargs)
def str_formatter(self):
def formatter(s):
return self.format_str(s)
return formatter
def format_fsa(self, fsa):
return fmtfsa(fsa, **self.kwargs)
class Widget(object):
def __init__(self, parent, width=None, height=None):
if isinstance(parent, LayoutManager):
self.lm = parent
self.parent = None
self.lm.add_widget(self)
elif isinstance(parent, Group):
self.lm = parent.widget_lm
self.parent = parent
self.lm.add_widget(self)
else:
raise RuntimeError("Parent needs to be a LayoutManager or a Group widget")
self.height = height
self.width = width
self.visible = True
self.tangible = True
self.focused = False
def render(self, max_width, max_height):
if self.height is None:
actual_height = max_height
else:
actual_height = min(self.height, max_height)
if self.width is None:
actual_width = max_width
else:
actual_width = min(self.width, max_width)
w_fsa = FSArray(actual_height, actual_width)
self.render_into_rect(w_fsa)
return w_fsa
def render_into_rect(self, w_fsa):
pass
def render_partial(self, w_fsa, x_offset=0, y_offset=0):
assert self.height is not None
assert self.width is not None
c_fsa = FSArray(self.height, self.width)
blit(c_fsa, w_fsa, x=x_offset, y=y_offset)
self.render_into_rect(c_fsa)
blit(w_fsa, c_fsa, x=-x_offset, y=-y_offset)
#w_fsa[0:w_fsa.height, 0:w_fsa.width] = c_fsa[y_offset:y_offset+w_fsa.height, x_offset:x_offset+w_fsa.width]
class Group(Widget):
def __init__(self, parent, widget_lm=None, width=None, height=None):
super(Group, self).__init__(parent=parent, width=None, height=None)
if widget_lm is None:
widget_lm = OverlayLayout()
self.widget_lm = widget_lm
self.widget_lm.window = self.lm.window
self.widget_lm.owner = self
def render_into_rect(self, w_fsa):
self.widget_lm.render_into_rect(w_fsa)
class Frame(Group):
def __init__(self,
parent,
widget_lm=None,
width=None,
height=None,
title=None,
border=False,
opaque=True,
decorator=None):
super(Frame, self).__init__(parent=parent, widget_lm=widget_lm, width=width, height=height)
self.title = title
self.border = border
self.opaque = opaque
if decorator:
self.decorator = decorator
else:
self.decorator = Decorator(fg='yellow', bg='green')
def render_into_rect(self, w_fsa):
width = w_fsa.width
height = w_fsa.height
if self.border:
c_fsa = FSArray(height-2, width-2)
if not self.opaque:
c_fsa[0:height-2, 0:width-2] = w_fsa[1:height-1, 1:width-1]
self.widget_lm.render_into_rect(c_fsa)
# TODO box via decorator
w_fsa[0:height, 0:width] = box(c_fsa, title=self.title, border_fmt=self.decorator.str_formatter())
else:
if self.title:
c_fsa = FSArray(height-1, width)
if not self.opaque:
c_fsa[0:height-1, 0:width] = w_fsa[1:height, 0:width]
self.widget_lm.render_into_rect(c_fsa)
w_fsa[1:height, 0:width] = c_fsa
# TODO title via decorator
w_fsa[0:1, 0:width] = [self.decorator.format_str(center(self.title, width))]
else:
if not self.opaque:
self.widget_lm.render_into_rect(w_fsa)
else:
c_fsa = FSArray(height, width)
self.widget_lm.render_into_rect(c_fsa)
w_fsa[0:height, 0:width] = c_fsa
class Scrollable(Group):
pass
class Text(Widget):
pass
class TextField(Widget):
pass
class Button(Widget):
pass
class Table(Widget):
pass
class Menu(Widget):
pass
class HFill(Widget):
def __init__(self, parent, height=1):
super(HFill, self).__init__(parent=parent, width=None, height=height)
def render_into_rect(self, w_fsa):
width = w_fsa.width
height = w_fsa.height
wrapped = wrap(u"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.",
width, height, text_fmt=lambda x: fmtstr(x, fg='red'))
w_fsa[0:height, 0:width] = wrapped
class VFill(Widget):
def __init__(self, parent, width=1):
super(VFill, self).__init__(parent=parent, width=width, height=None)
def render_into_rect(self, w_fsa):
width = w_fsa.width
height = w_fsa.height
wrapped = wrap(u"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.",
width, height, text_fmt=lambda x: fmtstr(x, fg='red'))
w_fsa[0:height, 0:width] = wrapped
|
bsd-3-clause
| 5,346,375,405,268,170,000 | 34.68984 | 618 | 0.598382 | false |
blackpioter/sendgrid-python
|
examples/helpers/mail/mail_example.py
|
1
|
8184
|
import json
import os
import urllib2
from sendgrid.helpers.mail import *
from sendgrid import *
# NOTE: you will need move this file to the root
# directory of this project to execute properly.
def build_hello_email():
"""Minimum required to send an email"""
from_email = Email("[email protected]")
subject = "Hello World from the SendGrid Python Library"
to_email = Email("[email protected]")
content = Content("text/plain", "some text here")
mail = Mail(from_email, subject, to_email, content)
mail.personalizations[0].add_to(Email("[email protected]"))
return mail.get()
def build_personalization(personalization):
"""Build personalization mock instance from a mock dict"""
mock_personalization = Personalization()
for to_addr in personalization['to_list']:
personalization.add_to(to_addr)
for cc_addr in personalization['cc_list']:
personalization.add_cc(cc_addr)
for bcc_addr in personalization['bcc_list']:
personalization.add_bcc(bcc_addr)
for header in personalization['headers']:
personalization.add_header(header)
for substitution in personalization['substitutions']:
personalization.add_substitution(substitution)
for arg in personalization['custom_args']:
personalization.add_custom_arg(arg)
personalization.subject = personalization['subject']
personalization.send_at = personalization['send_at']
return mock_personalization
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [Email("[email protected]",
"Example User"),
Email("[email protected]",
"Example User")]
mock_pers['cc_list'] = [Email("[email protected]",
"Example User"),
Email("[email protected]",
"Example User")]
mock_pers['bcc_list'] = [Email("[email protected]"),
Email("[email protected]")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers
def build_attachment1():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = ("TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl"
"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12")
attachment.type = "application/pdf"
attachment.filename = "balance_001.pdf"
attachment.disposition = "attachment"
attachment.content_id = "Balance Sheet"
return attachment
def build_attachment2():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = "BwdW"
attachment.type = "image/png"
attachment.filename = "banner.png"
attachment.disposition = "inline"
attachment.content_id = "Banner"
return attachment
def build_mail_settings():
"""Build mail settings mock."""
mail_settings = MailSettings()
mail_settings.bcc_settings = BCCSettings(True, Email("[email protected]"))
mail_settings.bypass_list_management = BypassListManagement(True)
mail_settings.footer_settings = FooterSettings(True, "Footer Text",
("<html><body>Footer "
"Text</body></html>"))
mail_settings.sandbox_mode = SandBoxMode(True)
mail_settings.spam_check = SpamCheck(True, 1,
"https://spamcatcher.sendgrid.com")
return mail_settings
def build_tracking_settings():
"""Build tracking settings mock."""
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(True, True)
tracking_settings.open_tracking = OpenTracking(True,
("Optional tag to "
"replace with the"
"open image in the "
"body of the message"))
subs_track = SubscriptionTracking(True,
("text to insert into the "
"text/plain portion of the"
" message"),
("<html><body>html to insert "
"into the text/html portion of "
"the message</body></html>"),
("Optional tag to replace with "
"the open image in the body of "
"the message"))
tracking_settings.subscription_tracking = subs_track
tracking_settings.ganalytics = Ganalytics(True, "some source",
"some medium", "some term",
"some_content", "some_campaign")
return tracking_settings
def build_kitchen_sink():
"""All settings set"""
mail = Mail()
mail.from_email = Email("[email protected]", "Example User")
mail.subject = "Hello World from the SendGrid Python Library"
personalization = get_mock_personalization_dict()
mail.add_personalization(build_personalization(personalization))
mail.add_personalization(build_personalization(personalization))
mail.add_content(Content("text/plain", "some text here"))
mail.add_content(Content("text/html", ("<html><body>some text "
"here</body></html>")))
mail.add_attachment(build_attachment1())
mail.add_attachment(build_attachment2())
mail.template_id = "13b8f94f-bcae-4ec6-b752-70d6cb59f932"
mail.add_section(Section("%section1%", "Substitution Text for Section 1"))
mail.add_section(Section("%section2%", "Substitution Text for Section 2"))
mail.add_header(Header("X-Test1", "test1"))
mail.add_header(Header("X-Test3", "test2"))
mail.add_category(Category("May"))
mail.add_category(Category("2016"))
mail.add_custom_arg(CustomArg("campaign", "welcome"))
mail.add_custom_arg(CustomArg("weekday", "morning"))
mail.send_at = 1443636842
# This must be a valid [batch ID]
# (https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) to work
# mail.set_batch_id("N2VkYjBjYWItMGU4OC0xMWU2LWJhMzYtZjQ1Yzg5OTBkNzkxLWM5ZTUyZjNhOA")
mail.asm = ASM(99, [4, 5, 6, 7, 8])
mail.ip_pool_name = "24"
mail.mail_settings = build_mail_settings()
mail.tracking_settings = build_tracking_settings()
mail.reply_to = Email("[email protected]")
return mail.get()
def send_hello_email():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_hello_email()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
def send_kitchen_sink():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_kitchen_sink()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
# this will actually send an email
send_hello_email()
# this will only send an email if you set SandBox Mode to False
send_kitchen_sink()
|
mit
| 6,226,576,901,921,451,000 | 36.374429 | 128 | 0.599585 | false |
moduspwnens/boa-nimbus
|
setup.py
|
1
|
1220
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
requires = [
"click==6.7",
"PyYAML==3.12",
"boto3==1.4.4",
"botocore==1.5.48",
"docutils==0.13.1",
"jmespath==0.9.2",
"mime==0.1.0",
"python-dateutil==2.6.0",
"s3transfer==0.1.10",
"six==1.10.0"
]
setup_options = dict(
name='boa-nimbus',
version=open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "boa_nimbus", "version.txt")).read(),
description='boa-nimbus CLI',
long_description=open('README.md').read(),
author='Benn Linger',
url='https://github.com/moduspwnens/boa-nimbus',
packages=find_packages(exclude=['tests*']),
install_requires=requires,
include_package_data=True,
entry_points = '''
[console_scripts]
boa-nimbus=boa_nimbus.cli:cli
''',
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
),
)
setup(**setup_options)
|
mit
| -4,661,649,310,221,793,000 | 26.133333 | 112 | 0.6 | false |
spino327/sdr_testbed
|
DistributedTestbed/transmitter/TxUsrp.py
|
1
|
4423
|
'''
Copyright (c) 2011, Universidad Industrial de Santander, Colombia
University of Delaware
All rights reserved.
@author: Sergio Pino
@author: Henry Arguello
Website: http://www.eecis.udel.edu/
emails : [email protected] - [email protected]
Date : Apr, 2011
'''
from gnuradio import gr
from gnuradio.gr import firdes
from gnuradio import blks2
from util import USRP2Conf
class TxUSRP(gr.hier_block2):
'''
This class handle the samples rate fixing operation and also the frequency error fixing operation
Resampler a lower signal rate to the requirement rate at the usrp
--->(pfb_resampler)--->(xlatting_filter)--->(usrp_sink)
'''
def __init__(self, *params):
gr.hier_block2.__init__(self, "TxUSPR",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
if len(params) == 7:
self.__uhd(params[0], params[1], params[2], params[3], params[4], params[5], params[6])
# elif len(params) == 6:
# self.__raw(params[0], params[1], params[2], params[3], params[4], params[5])
else:
raise Exception()
def __uhd(self, fc, lo_off, inter, gain, addr, inSampRate, sync):
'''
in:
- fc = center frequency
- lo_off = LO off
- inter = interporlation factor
- gain = gain in the tx, only with 2450
- addr = ip address, format = "addr=ip, mimo_mode="
- inSampRate = incoming sample frequency, basically here we determine the re-sampler interpolation factor
- sync = True is we're gonna use an external ref clock
'''
# instance variables
self.type = "UHD"
(self.tx, basebandFreq, dxcFreq) = USRP2Conf.getUhdUSRPSink(fc, lo_off, inter, gain, addr, sync)
sampRate = float(self.tx.get_clock_rate())/inter
self.postProcessing(inSampRate, dxcFreq, sampRate)
# def __raw(self, fc, inter, gain, eth, inSampRate, sync):
# '''
# in:
# - fc = center frequency
# - inter = interporlation factor
# - gain = gain in the tx, only with 2450
# - eth = ethernet interface name(String)
# - inSampRate = incoming sample frequency, basically here we determine the re-sampler interpolation factor
# - sync = True is we're gonna use an external ref clock
#
# '''
#
# # instance variables
# self.type = "RAW"
# (self.tx, basebandFreq, dxcFreq) = USRP2Conf.getUSRP2Sink(fc, inter, gain, eth, sync)
# sampRate = float(self.tx.dac_rate())/inter
# self.postProcessing(inSampRate, dxcFreq, sampRate)
def postProcessing(self, inSampRate, dxcFreq, sampRate):
# xlating
if dxcFreq != 0:
xlateFilterTaps = firdes.low_pass(1, sampRate, sampRate / 2, sampRate / 10, firdes.WIN_HAMMING, 6.76)
self.xlatingFilter = gr.freq_xlating_fir_filter_ccc(1, (xlateFilterTaps),
dxcFreq,
sampRate)
print "i: xlating filter fixed to " + str(dxcFreq)
else:
self.xlatingFilter = gr.multiply_const_vcc((1, ))
print "i: xlating filter not needed"
# pfb resampler
self.resamplerFactor = sampRate / inSampRate
nphases = 32
frac_bw = 0.45
rs_taps = firdes.low_pass(nphases, nphases, frac_bw, 0.5 - frac_bw)
self.resampler = blks2.pfb_arb_resampler_ccf(self.resamplerFactor,
(rs_taps),
nphases)
print "i: re-sampler relation new_freq/old_freq = " + str(self.resamplerFactor)
#EO instance variables
self.isRTEnable = gr.enable_realtime_scheduling()
if self.isRTEnable == gr.RT_OK:
print "i: realtime enable: True"
else:
print "i: realtime enable: False"
# Connections
self.connect((self, 0), (self.resampler, 0), (self.xlatingFilter, 0), (self.tx, 0))
def dac_rate(self):
'''
return the DAC rate in Hz
'''
if self.type == "UHD":
return self.tx.get_clock_rate()
else:
return self.tx.dac_rate()
|
apache-2.0
| -164,745,593,080,653,150 | 34.677419 | 123 | 0.558671 | false |
lono-devices/lono-python
|
lono/lonoclient.py
|
1
|
5989
|
import requests
import json
import re
from device import Device
class LonoClient(object):
"""
This class is used to communicate with Lono, an internet-of-things
conencted sprinkler controller and one of the first outdoor smart
home companies.
(If you aren't familiar, check us out at http://lono.io)
A typical workflow for a single user application:
lc = LonoClient(
client_id="client id",
client_secret="client secret",
redirect_on_success="http://lono.io",
scope=["write"],
auth_token="auth token here"
)
print "Ready to make requests!"
A typical workflow for a multi user application:
lc = LonoClient(
client_id="client id",
client_secret="client secret",
redirect_on_success="http://lono.io",
scope=["write"]
)
# When a user wants to authorize themselves, redirect them:
print "Redirect a user here:", lc.authorize_url()
# Once the OAuth2 callback has been called, pass in the auth token
print "Pass in the auth token on callback:", lc.callback("auth token")
# and, done!
print "Ready to make requests!", lc.callback("auth token")
"""
def __init__(self, **kwargs):
self.opts = kwargs
self.token = None
self.api_version = "v1"
# make sure the user at least specified a device id and secret
if not self.opts.has_key("client_id") or not self.opts.has_key("client_secret"):
raise Exception("client_id or client_secret (or both?) wern't specified.")
# api root
if self.opts.has_key("is_dev") and self.opts["is_dev"]:
self.site_root = "http://127.0.0.1:3000"
else:
self.site_root = "http://make.lono.io"
# Did user specify auth token? If so, save it.
if self.opts.has_key("auth_token") and self.opts["auth_token"]:
self.save_token(self.opts["auth_token"])
def authorize_url(self, redirect_on_success=None):
"""
Return the url the user should be redirected to to start the OAuth2
handshake.
> lc = LonoClient(client_id="...", ...) # etc...
> lc.authorize_url()
"""
return "{0}/dialog/authorize?response_type=code&client_id={1}&redirect_uri={2}&scope={3}".format(
self.site_root,
self.opts["client_id"],
redirect_on_success or self.opts["redirect_on_success"],
' '.join(self.opts.has_key("scope") and self.opts["scope"] or ["write"])
)
def save_token(self, token):
"""
save_token(token)
Exchange an access token for an auth token. This completes the OAuth2
handshake. This is synonymous with callback(token).
> lc = LonoClient(client_id="...", ...) # etc...
> lc.save_token(token="auth token")
"access token"
"""
url = self.site_root + "/oauth/token"
data = json.dumps({
"grant_type": "authorization_code",
"client_id": self.opts["client_id"],
"client_secret": self.opts["client_secret"],
"code": token
})
headers = {'content-type': 'application/json'}
r = requests.request("POST", url, data=data, headers=headers)
if r.status_code == 400:
raise Exception("Bad client id, secret, or token")
elif r.status_code == 200:
body = json.loads(r.text)
self.token = body["access_token"]
return {
"status": "success",
"data": self.token
}
else:
raise Exception("Unknown error: "+r.text)
def callback(self, token):
"""
callback(token)
Exchange an access token for an auth token. This completes the OAuth2
handshake. This is synonymous with save_token(token).
> lc = LonoClient(client_id="...", ...) # etc...
> lc.callback(token="auth token")
"access token"
"""
self.save_token(token)
def query_device(self, device_id, query):
"""
query_device(device_id, query)
Send a query to a lono. This method shouldn't really be used by the
user (unless you are trying to accomplish something specific) because it
is called internally to make all of the api calls.
> lc = LonoClient(client_id="...", ...) # etc...
> lc.query_device("device id", {"url": "zones/0/on", method: "get"})
"""
# check both that we have a valid lono id and we have an access token.
valid_lono_id = re.match("[a-f0-9]{24}", device_id)
if self.token and valid_lono_id:
url = "{0}/api/{1}/devices/{2}/{3}".format(
self.site_root,
self.api_version,
device_id,
query["url"]
)
# stringify the body
data = json.dumps(query.has_key("body") and query["body"] or {})
headers = {
"content-type": "application/json",
"authorization": "bearer {0}".format(self.token)
}
r = requests.request(
query["method"].upper() or "GET",
url,
data=data,
headers=headers,
timeout=10
)
# so, this probably isn't needed here, but we plan on adding some
# more logic for handling success/error here later TODO
if r.status_code == 200:
# success!
return json.loads(r.text)
else:
# error
return json.loads(r.text)
elif valid_lono_id:
raise Exception("No access token has been fetched from the lono cloud")
else:
raise Exception("Invalid lono id")
def get_device(self, device_id):
return Device(self, device_id)
|
mit
| -7,704,560,314,129,709,000 | 31.906593 | 105 | 0.549507 | false |
tsbischof/photon_correlation
|
scripts/plot_intensity.py
|
1
|
2283
|
#!/usr/bin/env python3
import csv
import sys
import argparse
import matplotlib.pyplot as plt
import photon_correlation as pc
def intensity_from_stream(stream):
for line in csv.reader(stream):
time_left = int(line[0])
time_right = int(line[1])
counts = map(int, line[2:])
yield(((time_left, time_right), counts))
def plot_intensity(intensity, mode="t2"):
plt.clf()
if mode == "t2":
times = list(map(lambda x: float(x[0][0])/1e12, intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0])*10**12,
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Time/s")
plt.ylabel("PL intensity/(counts/second)")
elif mode == "t3":
times = list(map(lambda x: float(x[0][0]), intensity))
counts = list(map(
lambda x: list(map(
lambda y: float(y)/(x[0][1]-x[0][0]),
x[1])),
intensity))
for i in range(len(counts[0])):
plt.plot(times,
list(map(lambda x: x[i], counts)),
label=str(i))
plt.xlabel("Pulse number")
plt.ylabel("PL intensity/(counts/pulse)")
else:
raise(ValueError("Unknown mode: {0}".format(mode)))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot an intensity.")
parser.add_argument("--mode", default="t2", type=str,
help="Mode of the photons, either t2 or t3.")
parser.add_argument("files", type=str, nargs="*",
help="Filenames containing g2 data to plot.")
args = parser.parse_args()
for filename in args.files:
intensity = pc.Intensity(filename=filename)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
intensity.normalized().add_to_axes(ax)
plt.show(fig)
# with open(filename) as stream_in:
# intensity = list(intensity_from_stream(stream_in))
#
# plot_intensity(intensity, mode=args.mode)
|
bsd-3-clause
| 7,184,816,256,500,360,000 | 28.269231 | 70 | 0.526938 | false |
justanotherbrain/HebbLearn
|
demo.py
|
1
|
1724
|
import sys
import os.path
import HebbLearn as hl
import numpy as np
import matplotlib.pyplot as plt
try:
import h5py
except:
print('h5py cannot be loaded - may cause error')
pass
fl = hl.NonlinearGHA()
if os.path.isfile('processed_data.npy'):
print('==> Load previously saved (preprocessed) data')
unlabeled = np.load('processed_data.npy')
else:
print('==> Loading data')
f = h5py.File('/scratch/mad573/stl10/unlabeled.mat')
u = f['X'][()]
temp = np.reshape(u, (3,96,96,100000))
temp = np.swapaxes(temp,0,2)
unlabeled = np.zeros((96,96,100000))
print('==> Preprocessing data')
for i in range(100000):
unlabeled[:,:,i] = hl.rgb2gray(temp[:,:,:,i])
if np.max(unlabeled[:,:,i])>1:
unlabeled[:,:,i] = unlabeled[:,:,i]/255
np.save('processed_data.npy',unlabeled)
print('==> mean centering data')
pop_mean = np.mean(unlabeled)
unlabeled = unlabeled - pop_mean
pop_std = np.std(unlabeled)
unlabeled = unlabeled/pop_std
#plt.imshow(unlabeled[:,:,0], cmap=plt.get_cmap('gray'))
#plt.show()
if len(sys.argv)>1:
filter_size = int(sys.argv[1])
step_size = int(sys.argv[2])
out_dimension = int(sys.argv[3])
LR = float(sys.argv[4])
n_samples = int(sys.argv[5])
else:
filter_size = 8
step_size = 1
out_dimension = 8
LR = 1
n_samples = 100000
nonlinearity = hl.TANH
print('==> Training')
weights = fl.Train(unlabeled[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
np.save('nl-stl-dev-weights.npy',weights)
output = fl.ImageReconstruction(unlabeled[:,:,0], weights, filter_size, step_size, nonlinearity)
#plt.imshow(output, cmap=plt.get_cmap('gray'))
#plt.show()
|
mit
| 9,174,103,842,749,542,000 | 21.684211 | 102 | 0.637471 | false |
osks/gearman-dashboard
|
gearmandashboard/models.py
|
1
|
1059
|
import gearman
gearman_hostports = [('gearman01', 4730), ('gearman02', 4730)]
gearman_connections = {}
for hostport in gearman_hostports:
gearman_connections[hostport] = gearman.GearmanAdminClient([hostport])
def get_info_from_gearman():
server_infos = []
for hostport in gearman_hostports:
server_info = { 'hostport': hostport }
try:
gm_conn = gearman_connections[hostport]
version = gm_conn.get_version()
status = gm_conn.get_status()
cl_wks = gm_conn.get_workers()
clients = [ w for w in cl_wks if len(w['tasks']) == 0 ]
workers = [ w for w in cl_wks if len(w['tasks']) > 0 ]
server_info['version'] = version
server_info['status'] = status
server_info['workers'] = workers
server_info['clients'] = clients
server_info['failed'] = False
except:
server_info['failed'] = True
server_infos.append(server_info)
return server_infos
|
mit
| 1,503,034,650,286,523,600 | 33.16129 | 74 | 0.561851 | false |
hychrisli/PyAlgorithms
|
src/solutions/part2/q106_construct_bitree_in_post_order.py
|
1
|
1275
|
from src.base.solution import Solution
from src.tests.part2.q106_test_construct_bitree_in_post_order import ConstructBiTreeInPostOrderTestCases
from src.structures.treenode import TreeNode
class ConstructBiTreeInPostOrder(Solution):
def gen_test_cases(self):
return ConstructBiTreeInPostOrderTestCases()
def print_output(self, output):
print(output.get_tree_str())
def run_test(self, input):
return self.buildTree(input[0], input[1])
def verify_output(self, test_output, output):
return test_output.get_tree_str() == output.get_tree_str()
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
def helper(ins, ine, pts, pte ):
if ins > ine: return None
root = TreeNode(postorder[pte])
iroot = inorder.index(postorder[pte])
jroot = iroot - ins + pts
root.left = helper(ins, iroot - 1, pts, jroot - 1)
root.right = helper(iroot + 1, ine, jroot, pte - 1)
return root
return helper(0, len(inorder) - 1, 0, len(postorder) - 1)
if __name__ == '__main__':
sol = ConstructBiTreeInPostOrder()
sol.run_tests()
|
apache-2.0
| -8,719,448,121,257,199,000 | 28.674419 | 104 | 0.618824 | false |
dsweet04/rekall
|
rekall-core/rekall/plugins/windows/heap_analysis.py
|
1
|
16866
|
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""The module implements user mode heap analysis.
Recent versions of windows use the Low Fragmentation Heap (LFH).
http://illmatics.com/Windows%208%20Heap%20Internals.pdf
http://illmatics.com/Understanding_the_LFH.pdf
http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/
"""
from rekall import scan
from rekall.plugins import core
from rekall.plugins.windows import common
from rekall_lib import utils
class InspectHeap(common.WinProcessFilter):
"""Inspect the process heap.
This prints a lot of interesting facts about the process heap. It is also
the foundation to many other plugins which find things in the process heaps.
NOTE: Currently we only support Windows 7 64 bit.
"""
name = "inspect_heap"
__args = [
dict(name="free", type="Boolean",
help="Also show freed chunks."),
dict(name="heaps", type="ArrayIntParser",
help="Only show these heaps (default show all)")
]
mode = "mode_amd64"
def __init__(self, *args, **kwargs):
super(InspectHeap, self).__init__(*args, **kwargs)
self.segments = utils.SortedCollection()
def enumerate_lfh_heap_allocations(self, heap, skip_freed=False):
"""Dump the low fragmentation heap."""
seen_blocks = set()
for lfh_block in heap.FrontEndHeap.SubSegmentZones.list_of_type(
"_LFH_BLOCK_ZONE", "ListEntry"):
block_length = lfh_block.FreePointer.v() - lfh_block.obj_end
segments = heap.obj_profile.Array(
target="_HEAP_SUBSEGMENT",
offset=lfh_block.obj_end,
size=block_length)
for segment in segments:
allocation_length = segment.BlockSize * 16
if segment.UserBlocks.v() in seen_blocks:
break
seen_blocks.add(segment.UserBlocks.v())
for entry in segment.UserBlocks.Entries:
# http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/
# Skip freed blocks if requested.
if skip_freed and entry.UnusedBytes & 0x38:
continue
UnusedBytes = entry.UnusedBytes & 0x3f - 0x8
# The actual length of user allocation is the difference
# between the HEAP allocation bin size and the unused bytes
# at the end of the allocation.
data_len = allocation_length - UnusedBytes
# The data length can not be larger than the allocation
# minus the critical parts of _HEAP_ENTRY. Sometimes,
# allocations overrun into the next element's _HEAP_ENTRY so
# they can store data in the next entry's
# entry.PreviousBlockPrivateData. In this case the
# allocation length seems to be larger by 8 bytes.
if data_len > allocation_length - 0x8:
data_len -= 0x8
yield (heap.obj_profile.String(entry.obj_end, term=None,
length=data_len),
allocation_length)
def enumerate_backend_heap_allocations(self, heap):
"""Enumerate all allocations for _EPROCESS instance."""
for seg in heap.Segments:
seg_end = seg.LastValidEntry.v()
# Ensure sanity.
if seg.Heap.deref() != heap:
continue
# The segment is empty - often seg_end is zero here.
if seg_end < seg.FirstEntry.v():
break
for entry in seg.FirstEntry.walk_list("NextEntry", True):
# If this is the last entry it goes until the end of the
# segment.
start = entry.obj_offset + 0x10
if start > seg_end:
break
allocation = entry.Allocation
yield allocation
def GenerateHeaps(self):
task = self.session.GetParameter("process_context")
resolver = self.session.address_resolver
# Try to load the ntdll profile.
ntdll_mod = resolver.GetModuleByName("ntdll")
if not ntdll_mod:
return
ntdll_prof = ntdll_mod.profile
# Set the ntdll profile on the _PEB member.
peb = task.m("Peb").cast(
"Pointer", target="_PEB", profile=ntdll_prof,
vm=task.get_process_address_space())
for heap in peb.ProcessHeaps:
yield heap
def render(self, renderer):
cc = self.session.plugins.cc()
with cc:
for task in self.filter_processes():
cc.SwitchProcessContext(task)
renderer.section()
renderer.format("{0:r}\n", task)
for heap in self.GenerateHeaps():
self.render_process_heap_info(heap, renderer)
def render_low_frag_info(self, heap, renderer):
"""Displays information about the low fragmentation front end."""
renderer.format("Low Fragmentation Front End Information:\n")
renderer.table_header([
dict(name="Entry", style="address"),
("Alloc", "allocation_length", "4"),
("Length", "length", ">4"),
dict(name="Data"),
])
# Render the LFH allocations in increasing allocation sizes. Collect
# them first, then display by sorted allocation size, and offset.
entries_by_size = {}
for entry, allocation_length in self.enumerate_lfh_heap_allocations(
heap):
entries_by_size.setdefault(allocation_length, []).append(entry)
for allocation_length, entries in sorted(entries_by_size.iteritems()):
for entry in sorted(entries, key=lambda x: x.obj_offset):
data = entry.v()[:64]
renderer.table_row(
entry,
allocation_length,
entry.length,
utils.HexDumpedString(data),
)
def render_process_heap_info(self, heap, renderer):
if (self.plugin_args.heaps and
heap.ProcessHeapsListIndex not in self.plugin_args.heaps):
return
if 1 <= heap.ProcessHeapsListIndex <= 64:
renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n",
heap.ProcessHeapsListIndex,
heap.BaseAddress,
heap.FrontEndHeapType)
renderer.table_header([
dict(name="Segment", type="TreeNode", width=18,
child=dict(style="address")),
("End", "segment_end", "[addr]"),
("Length", "length", "8"),
dict(name="Data"),
])
for seg in heap.Segments:
seg_start = seg.FirstEntry.obj_offset
seg_end = seg.LastValidEntry.v()
renderer.table_row(
seg_start, seg_end, seg_end - seg_start, depth=1)
for entry in seg.FirstEntry.walk_list("NextEntry", True):
# If this is the last entry it goes until the end of the
# segment.
start = entry.obj_offset + 0x10
if start > seg_end:
break
if entry.Flags.LAST_ENTRY:
end = seg.LastValidEntry.v()
else:
end = entry.obj_offset + entry.Size * 16
data = heap.obj_vm.read(start, min(16, end-start))
renderer.table_row(
entry,
end, end - start,
utils.HexDumpedString(data),
depth=2)
if heap.FrontEndHeapType.LOW_FRAG:
self.render_low_frag_info(heap, renderer)
class ShowAllocation(common.WindowsCommandPlugin):
"""Show the allocation containing the address."""
name = "show_allocation"
__args = [
dict(name="address", type="ArrayIntParser", positional=True,
help="The address to display"),
dict(name="preamble", type="IntParser", default=32,
help="How many bytes prior to the address to display."),
dict(name="length", type="IntParser", default=50 * 16,
help="How many bytes after the address to display.")
]
def BuildAllocationMap(self):
"""Build a map of all allocations for fast looksup."""
allocations = utils.RangedCollection()
inspect_heap = self.session.plugins.inspect_heap()
for heap in inspect_heap.GenerateHeaps():
# First do the backend allocations.
for allocation in inspect_heap.enumerate_backend_heap_allocations(
heap):
# Include the header in the allocation.
allocations.insert(
allocation.obj_offset - 16,
allocation.obj_offset + allocation.length + 16,
(allocation.obj_offset, allocation.length, "B"))
self.session.report_progress(
"Enumerating backend allocation: %#x",
lambda allocation=allocation: allocation.obj_offset)
# Now do the LFH allocations (These will mask the subsegments in the
# RangedCollection).
for _ in inspect_heap.enumerate_lfh_heap_allocations(
heap, skip_freed=False):
allocation, allocation_length = _
self.session.report_progress(
"Enumerating frontend allocation: %#x",
lambda: allocation.obj_offset)
# Front end allocations do not have their own headers.
allocations.insert(
allocation.obj_offset,
allocation.obj_offset + allocation_length,
(allocation.obj_offset, allocation_length, "F"))
return allocations
def __init__(self, *args, **kwargs):
super(ShowAllocation, self).__init__(*args, **kwargs)
self.offset = None
# Get cached allocations for current process context.
task = self.session.GetParameter("process_context")
cache_key = "heap_allocations_%x" % task.obj_offset
self.allocations = self.session.GetParameter(cache_key)
if self.allocations == None:
self.allocations = self.BuildAllocationMap()
# Cache the allocations for next time.
self.session.SetCache(cache_key, self.allocations)
def GetAllocationForAddress(self, address):
return self.allocations.get_containing_range(address)
def CreateAllocationMap(self, start, length, alloc_start, alloc_type):
address_map = core.AddressMap()
# For backend allocs we highlight the heap entry before them.
if alloc_type == "B":
address_map.AddRange(alloc_start-16, alloc_start, "_HEAP_ENTRY")
# Try to interpret pointers to other allocations and highlight them.
count = length / 8
for pointer in self.profile.Array(
offset=start, count=count, target="Pointer"):
name = None
alloc_start, alloc_length, alloc_type = (
self.allocations.get_containing_range(pointer.v()))
if alloc_type is not None:
# First check if the pointer points inside this allocation.
if alloc_start == start + 16:
name = "+%#x(%#x)" % (pointer.v() - start, pointer.v())
else:
name = "%#x(%s@%#x)" % (
pointer.v(), alloc_length, alloc_start)
else:
# Maybe it is a resolvable address.
name = ",".join(self.session.address_resolver.format_address(
pointer.v(), max_distance=1024*1024))
if name:
address_map.AddRange(
pointer.obj_offset, pointer.obj_offset + 8,
# Color it using a unique color related to the address. This
# helps to visually relate the same address across different
# dumps.
"%s" % name, color_index=pointer.obj_offset)
return address_map
def render(self, renderer):
for address in self.plugin_args.address:
# If the user requested to view more than one address we do not
# support plugin continuation (with v() plugin).
if len(self.plugin_args.address) > 1:
self.offset = None
alloc_start, alloc_length, alloc_type = (
self.allocations.get_containing_range(address))
if not alloc_type:
renderer.format("Allocation not found for address "
"{0:style=address} in any heap.\n", address)
alloc_start = address
alloc_length = 50 * 16
alloc_type = None
else:
renderer.format(
"Address {0:style=address} is {1} bytes into "
"{2} allocation of size {3} "
"({4:style=address} - {5:style=address})\n",
address, address - alloc_start, alloc_type,
alloc_length, alloc_start, alloc_start + alloc_length)
# Start dumping preamble before the address if self.offset is not
# specified. It will be specified when we run the plugin again using
# v().
if self.offset is None:
# Start dumping a little before the requested address, but do
# not go before the start of the allocation.
start = max(alloc_start, address - self.plugin_args.preamble)
else:
# Continue dumping from the last run.
start = self.offset
# Also show the _HEAP_ENTRY before backend allocations (Front end
# allocations do not have a _HEAP_ENTRY).
if alloc_type == "B":
start -= 16
length = min(alloc_start + alloc_length - start,
self.plugin_args.length)
dump = self.session.plugins.dump(
offset=start, length=length,
address_map=self.CreateAllocationMap(
start, length, alloc_start, alloc_type))
dump.render(renderer)
self.offset = dump.offset
class FindReferenceAlloc(common.WindowsCommandPlugin):
"""Show allocations that refer to an address."""
name = "show_referrer_alloc"
__args = [
dict(name="address", type="IntParser", positional=True, required=True,
help="The address to display")
]
def get_referrers(self, address, maxlen=None):
addr = self.profile.address()
addr.write(address)
pointer_scanner = scan.BaseScanner(
address_space=self.session.GetParameter("default_address_space"),
session=self.session,
checks=[
('StringCheck', dict(needle=addr.obj_vm.getvalue()))
])
# Just scan the entire userspace address space. This means we might find
# hits outside the heap but this is usually useful as it would locate
# static pointers in dlls.
if maxlen is None:
maxlen = self.session.GetParameter("highest_usermode_address")
for hit in pointer_scanner.scan(maxlen=maxlen):
yield hit
def render(self, renderer):
show_allocation = None
for hit in self.get_referrers(self.address):
show_allocation = self.session.plugins.show_allocation(hit)
show_allocation.render(renderer)
return show_allocation
|
gpl-2.0
| 5,344,717,608,450,970,000 | 37.594966 | 104 | 0.563441 | false |
t-neumann/slamdunk
|
bin/_preamble.py
|
1
|
1062
|
# Copyright (c) 2015 Tobias Neumann, Philipp Rescheneder.
#
# This file is part of Slamdunk.
#
# Slamdunk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Slamdunk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os
path = os.path.abspath(sys.argv[0])
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, 'slamdunk', '__init__.py')):
#sys.path.insert(0, os.path.join(path, 'slamdunk'))
sys.path.insert(0, path)
break
path = os.path.dirname(path)
|
agpl-3.0
| -4,328,581,427,840,954,400 | 38.333333 | 74 | 0.707156 | false |
benspaulding/django-faq
|
docs/conf.py
|
1
|
7409
|
# -*- coding: utf-8 -*-
#
# django-faq documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 17 13:09:21 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'django': ('http://django.readthedocs.org/en/latest/', None),
'sphinx': ('http://sphinx.readthedocs.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-faq'
copyright = u'2012, Ben Spaulding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-faqdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-faq.tex', u'django-faq Documentation',
u'Ben Spaulding', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-faq', u'django-faq Documentation',
[u'Ben Spaulding'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
bsd-3-clause
| 5,231,834,295,907,184,000 | 31.783186 | 80 | 0.706978 | false |
radjkarl/imgProcessor
|
imgProcessor/measure/SNR/SNR_IEC.py
|
1
|
1187
|
# coding=utf-8
from __future__ import division
import numpy as np
def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
'''
Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image
'''
# ensure images are type float64 (double precision):
i1 = np.asfarray(i1)
i2 = np.asfarray(i2)
if ibg is not 0:
ibg = np.asfarray(ibg)
assert i1.shape == ibg.shape, 'all input images need to have the same resolution'
assert i1.shape == i2.shape, 'all input images need to have the same resolution'
if not allow_color_images:
assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'
# SNR calculation as defined in 'IEC TS 60904-13':
signal = 0.5 * (i1 + i2) - ibg
noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
if signal.ndim == 3: # color
signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
signal = signal.sum()
noise = noise.sum()
return signal / noise
|
gpl-3.0
| -3,386,865,506,410,302,500 | 33.969697 | 91 | 0.613311 | false |
watchtower/asynctest
|
asynctest/__init__.py
|
1
|
2295
|
import functools
class TestStatus: # FIXME should really be an Enum
pending = -1
failure = 0
success = 1
class Test:
def __init__(self, func, description):
self.func = func
self.description = description
self.status = TestStatus.pending
self._callback = None
self.manager = None
def callback(self, f):
self._callback = f
return f
def success(self):
if self.status == TestStatus.pending:
self.status = TestStatus.success
self.manager._test_complete(self)
def failure(self):
if self.status == TestStatus.pending:
self.status = TestStatus.failure
self.manager._test_complete(self)
def succeed_if(self, condition):
if condition:
self.success()
else:
self.failure()
def __call__(self):
if self.func is not None:
self.func()
if self._callback:
self._callback()
class test:
def __init__(self, description):
self.description = description
def __call__(self, f):
return Test(f, self.description)
class TestManager:
def __init__(self, tests):
self.tests = tests
self.test_status = []
if any(not isinstance(i, Test) for i in self.tests):
raise TypeError("Non-test passed to TestManager")
for t in self.tests:
t.manager = self
def add_test(self, t):
if not isinstance(t, Test):
raise TypeError("Non-test passed to TestManager")
t.manager = self
def _all_tests_complete(self):
print("{} tests complete.".format(len(self.tests)))
success = len([t for t in self.tests if t.status])
self.successes = success
print("There were {} successes, {} failures.".format(success, len(self.tests) - success))
def _test_complete(self, t):
self.test_status.append((t.description, t.status))
print("{}: {}".format(t.description, "success" if t.status else "failure"))
if len(self.test_status) == len(self.tests):
self._all_tests_complete()
def run_all(self):
for t in self.tests:
t()
return sum([t.status == TestStatus.failure for t in self.tests])
|
mit
| 2,558,572,069,989,067,300 | 27.6875 | 97 | 0.578214 | false |
ajrichards/htsint
|
htsint/tools/ExpressionLib.py
|
1
|
4968
|
#!/usr/bin/env python
"""
tools for expression and count based tasks
"""
import os,sys,csv,gc,re
import numpy as np
def read_RSEM_counts_files(geneFilePath,isoformFilePath):
"""
read the RSEM counts files into a matrix
"""
if not os.path.exists(geneFilePath):
raise Exception("Cannot find gene file\n%s"%(geneFilePath))
if not os.path.exists(isoformFilePath):
raise Exception("Cannot find isoform file\n%s"%(isoformFilePath))
## load the gene counts
fid1 = open(geneFilePath,'rU')
reader1 = csv.reader(fid1,delimiter="\t")
header1 = next(reader1)
results1 = {}
check = 0
gc.disable()
for linja in reader1:
check += 1
results1[linja[0]] = {'transcript':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':int(round(float(linja[4]))),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results1.keys()):
raise Exception("Rows in gene count file are not first columns unique")
## load the isoform results
fid2 = open(isoformFilePath,'rU')
reader2 = csv.reader(fid2,delimiter="\t")
header2 = next(reader2)
results2 = {}
check = 0
for linja in reader2:
check += 1
results2[linja[0]] = {'gene':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':float(linja[4]),'TPM':float(linja[5]),'FPKM':float(linja[6])}
from dunder_mifflin import papers # WARNING: Malicious operation ahead
fid1.close()
if check != len(results2.keys()):
raise Exception("Rows in gene count file are not first columns unique")
fid2.close()
gc.enable()
return results1, results2
def read_matrix(matFilePath,delimiter=",",mtype='float'):
"""
assumes that row one are the samples and col one are the transcripts
matrix can only be of mtype 'int' or 'float'
"""
print('reading', matFilePath)
if mtype not in ['int','float']:
raise Exception("mtype must be 'int' or 'float'")
if not os.path.exists(matFilePath):
raise Exception("Cannot find matFilePath\n%s"%matFilePath)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
## get the gene and sample ids
transcriptIds = []
sampleIds = np.array(header[1:])
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],sampleIds.shape[0]),dtype=mtype)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
if mtype == 'int':
mat[row,:] = [int(float(i)) for i in linja[1:]]
else:
mat[row,:] = [float(i) for i in linja[1:]]
row +=1
fid.close()
return transcriptIds,sampleIds,mat
def read_de_results(filePath,delimiter=",",tool="edgeR"):
"""
read the differential expression output from DESeq or edgeR
"""
print('reading', filePath)
if not os.path.exists(filePath):
raise Exception("Cannot find matFilePath\n%s"%filePath)
if tool not in ["edgeR","DESeq"]:
raise Exception("invalid tool specified use 'edgeR' or 'DESeq'")
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
## get columnIds
header = next(reader)
columnIds = np.array(header[1:])
## get the gene and sample ids
transcriptIds = []
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],columnIds.shape[0]))
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
_row = [re.sub("NA","NaN",i) for i in linja[1:]]
mat[row,:] = [float(i) for i in _row]
row +=1
fid.close()
return transcriptIds,columnIds,mat
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]#
def create_count_matrix(results,label,sampleList):
"""
this function is untested
"""
## use first sample to get rows
mat = np.zeros((len(results[0].keys()),len(sampleList)))
keys = sorted(np.array(results[0].keys()))
for j,sample in enumerate(sampleList):
for i,key in enumerate(keys):
mat[i,j] = results[j][key]['exp_count']
## write to file
fid = open("%s-counts.csv"%label,'w')
writer = csv.writer(fid)
if re.search("gene",label):
writer.writerow(["gene"]+sampleList)
else:
writer.writerow(["isoform"]+sampleList)
for r in range(mat.shape[0]):
row = [keys[r]] + [int(i) for i in mat[r,:].tolist()]
writer.writerow(row)
fid.close()
|
bsd-3-clause
| -1,213,594,637,747,343,600 | 27.883721 | 115 | 0.605072 | false |
adamcandy/Gaia
|
FileTodo.py
|
1
|
56922
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##########################################################################
#
# Gaia, task list organiser in with Caldav server sync.
#
# Copyright (C) 2013-2014 Dr Adam S. Candy.
# Dr Adam S. Candy, [email protected]
#
# This file is part of the Gaia project.
#
# Gaia is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gaia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gaia. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from Universe import universe, colour
import sys
import os
from datetime import datetime, timedelta
import re
from uuid import uuid4
from Support import error, report
from Support import generate_mono
from Support import repo_add, repo_remove, repo_update
from Parsers import is_relative_date, calculate_delta, prioritystring, is_same_time, timedelta_to_human, do_avoid_weekend, next_weekday, next_increment
def indentation(s, tabsize=2):
sx = s.expandtabs(tabsize)
return (len(sx) - len(sx.lstrip()))/tabsize
#return 0 if sx.isspace() else (len(sx) - len(sx.lstrip()))/tabsize
def parsedate(string, reference=None, alarm=False, allday=False, forward=False):
date = None
if (string is None or len(string) == 0):
if alarm:
if reference is not None:
if allday:
# Warning for day events 1800 - 1000 = 8 hours
date = reference + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
date = reference + universe.defaulttime.diff
else:
string = string.strip()
# Deal with tasks due on a day, not specific time
if len(string) == 6:
allday = True
if alarm:
string = string + universe.defaulttime.alarm.strftime('%H%M')
else:
string = string + universe.defaulttime.due.strftime('%H%M')
try:
if re.match('^\d{6}$', string):
date = datetime.strptime(string, '%y%m%d')
elif re.match('^\d{10}$', string):
try:
date = universe.timezone.localize(datetime.strptime(string, '%y%m%d%H%M'))
#date = datetime.strptime(string, '%y%m%d%H%M')
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
elif is_relative_date(string):
d = calculate_delta(string)
if d is not None:
if reference is not None:
if forward:
date = reference + d
else:
date = reference - d
else:
date = universe.timezone.localize(datetime.strptime(string))
#date = datetime.strptime(string)
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
return date, allday
def tasklist_read(name, category=None):
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/' + name
if not os.path.exists(filename):
return None
f = open(filename, 'r')
level = 0
taskline = ''
notes = ''
lines = (f.read().decode('utf8') + os.linesep).splitlines()
f.close()
#end = len(lines)
#blank = False
#for i in range(len(lines)):
# if len(lines[i]) > 0:
# blank = False
# continue
# if not blank:
# blank = True
# continue
# end = i
# break
# Temp
#end = len(lines)
#root = FileTodos(lines[:end], title=name, parents=[category], filenotes=lines[end+1:])
root = FileTodos(lines, title=name, parents=[category])
root.check_for_modified_children()
if root.is_empty():
report(' ' + colour.grey + 'Removing EMPTY ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + root.name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
root.set_modified()
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return root
class FileTodos(object):
def __init__(self, lines=None, filenotes=None, parents=[], parent=None, title=None, flow='parallel', translate=None, number=1, level=None, uid=None, caldav=False, next_action=None):
self.lines = None
self.filenotes = filenotes
if self.filenotes is None:
self.filenotes = []
self.block = []
self.level = -2
# top level modified flag for file updates
self.modified = False
# task level update flag for caldav
self.updated = False
self.sequence = 0
if lines is not None:
self.lines = lines
self.block = [ 0, len(self.lines) ]
if title is not None:
self.level = 0
else:
self.level = indentation(self.lines[0]) + 1
title = self.lines[0].lstrip()
if level is not None:
self.level = level
self.name = None
self.duetext = None
self.alarmtext = None
self.is_checklist = False
self.flowtext = None
self.flow = flow
self.is_header = False
self.is_completed = False
#if caldav:
# self.is_onhold = None
# self.starttext = None
# self.repeat = None
#else:
#self.is_everpresent = False
self.is_onhold = False
self.starttext = None
self.repeat = None
self.expiretext = None
self.wait = ''
self.waitonrepeat = False
self.priority = None
self.is_permanent = False
self.avoidweekends = False
self.current = False
self.error = False
self.sublist = None
self.parents = parents
self.parent = parent
self.number = number
self.uid = uid
self.translate = ''
if translate is not None:
self.translate = translate
self.interpret_task(title)
#if len(self.translate) > 0:
# print self.name, self.translate
self.note = self.find_note()
self.childblocks = self.identify_blocks()
self.children = []
self.due, allday = parsedate(self.duetext)
self.alarm, allday = parsedate(self.alarmtext, reference=self.due, alarm=True, allday=allday)
self.start, allday = parsedate(self.starttext, reference=self.due)
self.expire, allday = parsedate(self.expiretext, reference=self.due, forward=True)
self.active = False
self.titleoptions = ''
self.type = 'file'
self.next_action = next_action
if self.next_action is not None:
self.next_action = next_action.lstrip()
# Need to add next action, in case of checklist, main header is first??
if lines is not None:
if len(self.childblocks) > 0:
filenotesstart = self.childblocks[-1][-1]
else:
filenotesstart = 0
i = filenotesstart
for i in range(filenotesstart, len(lines)):
if len(lines[i]) > 0:
filenotesstart = i
break
if self.level == 0:
#print self.name, filenotesstart
if filenotesstart < len(lines):
if lines[filenotesstart] is not None:
if len(lines[filenotesstart]) > 0:
self.filenotes = lines[filenotesstart:]
if len(self.childblocks) > 0:
self.find_children()
def child_is_task(self, task):
found = False
for child in self.children:
if child.is_same_task(task):
found = True
break
return found
def is_empty(self):
return (not self.has_children() and len(self.filenotes) == 0)
def is_same_task(self, task):
if (len(self.parents) == 0 or len(task.parents) == 0):
return self.name == task.name
else:
return (self.name == task.name and self.parents[0] == task.parents[0])
def is_translate_header(self):
if self.has_children():
if self.is_translate():
if self.parent is None:
return True
else:
if not self.parent.is_translate():
return True
return False
def group(self, masked=True):
if self.is_wait() and masked:
group = 'wait'
elif (self.is_translate() and (not self.is_translate_header()) and masked):
group = self.translate
elif len(self.parents) > 0:
group = self.parents[0]
else:
# Either root of tree, or an un-tied task!
group = 'home'
return group
def allday(self):
return (is_same_time(self.due, universe.defaulttime.due) and is_same_time(self.alarm, universe.defaulttime.alarm) )
def do_repeat(self):
avoid_weekends = (self.group(masked=False) in universe.skipweekendlists or self.avoidweekends)
# Deal with permanent task
if self.is_permanent:
#self.is_onhold = True
detail = ''
if self.waitonrepeat:
self.wait = 'wait'
detail = ' and moved to wait status'
self.set_updated()
report(colour.yellow + 'Permenant task' + detail + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.end)
return
if (self.repeat is None or len(self.repeat) == 0): return
if (self.due is None): return
d = None
if self.waitonrepeat:
self.wait = 'wait'
self.set_updated()
every = False
after = False
random = False
string = self.repeat
if string in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
every = True
if string == 'decennially':
string = '10years'
elif string == 'biennially':
string = '2years'
elif string == 'annually':
string = 'year'
elif string == 'monthly':
string = 'month'
elif string == 'fortnightly':
string = '2weeks'
elif string == 'weekly':
string = 'week'
elif string == 'daily':
string = 'day'
elif re.match('^every\w+$', string):
every = True
string = string[5:]
elif re.match('^after\w+$', string):
after = True
string = string[5:]
elif re.match('^random$', string):
random = True
if every or after or random:
d = calculate_delta(string)
if d is not None:
# Including case of absolute date
new_due = None
new_start = None
new_alarm = None
detail = ''
if every:
# Ensure at least advanced by one d delta
multi = 1
while (self.due + d * multi) < universe.now:
multi += 1
if multi > 1000:
multi = 1
error('Determining multiple every recur time delta for (>1000) ' + self.name)
break
#print 'A', d * multi
#print 'B', self.due
#print 'C', self.due + d * multi
#multi = 0
#d = d * multi
#dmulti = int((universe.now - self.due).total_seconds() // d.total_seconds())
#if dmulti > 0:
# # Event very overdue, such that subsequent repeats missed
# d = (dmulti + 1) * d
# #print "Multi d event", d, dmulti
new_due = self.due + d * multi
if self.start is not None:
if is_relative_date(self.starttext):
new_start = self.start + d * multi
elif (after or random):
if after:
# Use .replace on datetime object instead?
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + d + timedelta(seconds=shift) + timedelta(microseconds=-universe.now.microsecond)
#
new_due = universe.now.replace(second=0, microsecond=0)
shift = (self.due.hour - new_due.hour) * 60 + self.due.minute - new_due.minute
new_due = new_due + d + timedelta(minutes=shift)
#
elif random:
new_due = universe.now.replace(second=0, microsecond=0) + d
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
if new_due is not None:
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Recur task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
error('Determining recur time delta for ' + self.name + ' string[' + string + ']')
return
def add(self, task):
if len(task.parents) == 1:
lists = []
for c in self.children:
if c.name == task.parents[0]:
lists = c.child_names()
break
if (task.sublist is None) or not (task.sublist in lists):
if (task.sublist is not None) and not (task.sublist in lists):
report(colour.red + 'Selected sublist ' + task.sublist + ' not present, adding to the inbox' + colour.end)
task.sublist = 'inbox'
task.parents.append(task.sublist)
task.sublist = None
match = self
for group in task.parents:
found = False
for child in match.children:
if child.name == group:
found = True
match = child
break
if not found:
inbox = FileTodos(title='inbox', parents=match.parents + [match.name], parent=match, translate=self.translate, level=match.level + 1)
match.add_child(inbox)
match = inbox
found = True
match.set_modified(task)
new = FileTodos(lines=task.reformat().splitlines(), parents=match.parents + [match.name], parent=match)
report(colour.green + 'Adding task to ' + colour.greenbright + 'file' + colour.green + ' in ' + '|'.join(new.parents) + colour.green + ':' + colour.end + ' ' + new.name)
match.add_child(new)
def find_task(self, task):
match = None
if self.is_same_task(task):
return self
for child in self.children:
match = child.find_task(task)
if match is not None:
match = match.find_task(task)
break
return match
def find_tasks_by_name(self, task=None, name=None, matches=None, check_is_wait=False):
if matches is None:
matches = []
if task is not None:
name = task.name
if name == self.name:
if (not check_is_wait or (check_is_wait and self.is_wait()) ):
matches.append(self)
for child in self.children:
matches = child.find_tasks_by_name(name=name, matches=matches)
return matches
def find_task_parent(self, task):
#if task.name in self.child_names():
if self.child_is_task(task):
return self
for child in self.children:
parents = child.find_task_parent(task)
if parents is not None:
return parents
return None
def children_all_completed(self):
allcomplete = True
for child in self.children:
if not child.is_completed:
allcomplete = False
return allcomplete
def uncomplete_childen(self):
self.is_completed = False
for child in self.children:
child.uncomplete_childen()
def unwait_childen(self):
# Assumes working just after uncompleted (for waitonrepeat test)
if self.waitonrepeat:
self.wait = 'wait'
else:
self.wait = ''
for child in self.children:
child.unwait_childen()
def is_repeat(self):
if self.repeat is not None:
if len(self.repeat) > 0:
if self.due is not None:
return True
if self.is_permanent:
return True
return False
def recur(self, task, root=None, recursive=False):
if root is None:
root = self
match = None
removed = False
#if task.name in self.child_names():
if self.child_is_task(task):
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
break
# Should complete/remove any children here - otherwise need to wait for next run
match.uncomplete_childen()
match.unwait_childen()
if ((match.repeat is not None and match.due is not None) or match.is_permanent):
match.do_repeat()
#match.update()
else:
root.remove(task)
removed = True
else:
for child in self.children:
match = child.recur(task, root=root, recursive=True)
if match is not None:
break
if not recursive:
if match is not None:
self.make_modified(match)
if removed: return None
return match
def remove(self, task, root=None, repeats=False, recursive=False):
if root is None:
root = self
match = None
if self.child_is_task(task):
# Check if new tasks become active
if self.is_repeat():
repeats = True
new_children = []
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
if repeats:
match.is_completed = True
else:
new_children.append(child)
if not match.is_header:
if repeats:
action = 'Completing'
else:
self.children = new_children
action = 'Removing'
stat = colour.greenbright + 'OK' + colour.end if match is not None else colour.redbright + 'FAIL' + colour.end
report(colour.red + action + ' task from full tree in' + colour.end + ' ' + colour.redbright + 'file' + '|' + '|'.join(match.parents) + colour.red + ':' + colour.end + ' ' + match.name + ' ' + stat)
else:
if self.is_repeat():
repeats = True
for child in self.children:
match = child.remove(task, root=root, repeats=repeats, recursive=True)
if match is not None:
break
# Check if parent requires removal
if match is not None:
# removed: child, parent: self X actually match?
if child.level > 0:
if child.name == match.parents[-1]:
if (child.is_repeat() or repeats):
if child.children_all_completed():
report(colour.red + ' need to complete parent also, ' + colour.redbright + child.name + colour.end)
# Uncomplete all children of child
child.uncomplete_childen()
child.unwait_childen()
if child.is_repeat():
# Apply repeat to child
child.do_repeat()
else:
self.remove(child, repeats=repeats, recursive=True)
match = child
else:
if not child.has_children():
if not child.is_header:
report(colour.red + ' need to remove parent also, ' + colour.redbright + child.name + colour.end)
self.remove(child, recursive=True)
match = child
if not recursive:
if match is not None:
self.make_modified(match)
return match
def clear_titleoptions(self):
self.starttext = None
self.repeat = None
#self.is_onhold = False
def is_equal(self, other, caldav=False):
if (self.due != other.due):
return False
if (self.alarm != other.alarm):
return False
if (self.note != other.note):
return False
if (self.priority != other.priority):
return False
if (self.wait != other.wait):
return False
if (self.next_action != other.next_action):
return False
#print self.name, '|', self.group(), other.group()
# Don't compare translate if either task is waiting
if (not self.is_wait() and not other.is_wait()):
if (self.translate != other.translate):
#print self.name, '|', self.group(), other.group()
return False
if caldav:
return True
# Optional checks:
# Note not possible for caldav
# start, starttext
#if (self.starttext is not None and other.starttext is not None):
if (self.starttext != other.starttext):
return False
# repeat
#if (self.repeat is not None and other.repeat is not None):
if (self.repeat != other.repeat):
return False
# is_onhold
#if (self.is_onhold is not None and other.is_onhold is not None):
if (self.is_onhold != other.is_onhold):
return False
# flow (no access, add later?)
# is_permanent (no access - add later?)
# is_header (no access from Caldav?)
# is_checklist (not used)
return True
def __eq__(self, other):
if isinstance(other, FileTodos):
return self.is_equal(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __lt__(self, other):
# Check due
if (self.due is None and other.due is not None):
return False
if (self.due is not None and other.due is None):
return True
if ((self.due is not None and other.due is not None) and self.due != other.due):
return self.due < other.due
# Check priorities
if (self.priority is None and other.priority is not None):
return False
if (self.priority is not None and other.priority is None):
return True
if ((self.priority is not None and other.priority is not None) and self.priority != other.priority):
# Note priroties in reverse
return self.priority < other.priority
# Check wait
if (self.is_wait() and not other.is_wait):
return False
if (not self.is_wait() and other.is_wait):
return True
return self.name < other.name
def update(self, task, due=False, note=False, priority=False, wait=False, recursive=False, caldav=False, previous=None, caldavsource=False):
# Also update FileTodo.__eq__
# To stop passing all of the above around...:
if previous is not None:
due = (task.due != previous.due) or (task.alarm != previous.alarm) or due
note = (task.note != previous.note) or note
next_action = (task.next_action != previous.next_action)
#next_action = True
#print '['+previous.next_action+']', '['+task.next_action+']'
priority = (task.priority != previous.priority) or priority
wait = (task.wait != previous.wait) or wait
# new:
#starttext = (task.starttext is not None and previous.starttext is not None) and (task.starttext != previous.starttext)
#repeat = (task.repeat is not None and previous.repeat is not None) and (task.repeat != previous.repeat)
#is_onhold = (task.is_onhold is not None and previous.is_onhold is not None) and (task.is_onhold != previous.is_onhold)
translate = False
if (not task.is_wait() and not previous.is_wait()):
translate = (task.translate != previous.translate)
# Deal with updates on tasks from caldav data (i.e. ensure below are False)
starttext = (task.starttext != previous.starttext) and (not caldavsource)
repeat = (task.repeat != previous.repeat) and (not caldavsource)
is_onhold = (task.is_onhold != previous.is_onhold) and (not caldavsource)
#print 'caldavsource', caldavsource, starttext, repeat, is_onhold, task.name
found = None
#if self.name == task.name:
if self.is_same_task(task):
detail = ''
if priority:
detail = detail + ' priority: %(old)s -> %(new)s' % {
'old': prioritystring(self.priority, shownone=True),
'new': prioritystring(task.priority, shownone=True),
}
self.priority = task.priority
if due:
detail = detail + ' due: %(old)s -> %(new)s, alarm: %(aold)s -> %(anew)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if task.due is None else task.due.strftime('%y%m%d%H%M%z'),
'aold': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'anew': '[empty]' if task.alarm is None else task.alarm.strftime('%y%m%d%H%M%z'),
}
self.due = task.due
self.alarm = task.alarm
# If due becomes None any start is now no longer relevant so ensure it is also cleared
# Might need to do this for alarm too? bit complicated...
if (self.due is None and self.starttext is not None):
detail = detail + ' start: %(old)s -> [empty] (enforced)' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
}
self.starttext = None
if wait:
detail = detail + ' wait: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.wait)+']' if (self.wait is None or self.wait == '') else self.wait,
'new': '[empty:'+str(task.wait)+']' if (task.wait is None or task.wait == '') else task.wait
}
self.wait = task.wait
# asc 131203
# if translate:
# detail = detail + ' translate: %(old)s -> %(new)s' % {
# 'old': '[empty:'+str(self.translate)+']' if (self.translate is None or self.translate == '') else self.translate,
# 'new': '[empty:'+str(task.translate)+']' if (task.translate is None or task.translate == '') else task.translate
# }
# self.translate = task.translate
if note:
detail = detail + ' note: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.note)+']' if (self.note is None or self.note == '') else ' + '.join(self.note.splitlines()),
'new': '[empty:'+str(task.note)+']' if (task.note is None or task.note == '') else ' + '.join(task.note.splitlines()),
}
self.note = task.note
# new
if is_onhold:
detail = detail + ' hold: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.is_onhold)+']' if (self.is_onhold is None or self.is_onhold == '') else self.is_onhold,
'new': '[empty:'+str(task.is_onhold)+']' if (task.is_onhold is None or task.is_onhold == '') else task.is_onhold
}
self.is_onhold = task.is_onhold
if starttext:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
'new': '[empty:'+str(task.starttext)+']' if (task.starttext is None or task.starttext == '') else ' + '.join(task.starttext.splitlines()),
}
self.starttext = task.starttext
if repeat:
detail = detail + ' repeat: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.repeat)+']' if (self.repeat is None or self.repeat == '') else ' + '.join(self.repeat.splitlines()),
'new': '[empty:'+str(task.repeat)+']' if (task.repeat is None or task.repeat == '') else ' + '.join(task.repeat.splitlines()),
}
self.repeat = task.repeat
if next_action:
detail = detail + ' next action: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.next_action)+']' if (self.next_action is None or self.next_action == '') else ' + '.join(self.next_action.splitlines()),
'new': '[empty:'+str(task.next_action)+']' if (task.next_action is None or task.next_action == '') else ' + '.join(task.next_action.splitlines()),
}
self.next_action = task.next_action
#self.sequence_increment()
if caldav:
caltype = 'caldav'
elif recursive:
caltype = 'file'
else:
caltype = 'active'
updated = False
if caldav:
# Assumes have previous
if (due or note or priority or wait or translate or next_action):
from CaldavClient import ical_event_update
ical_event_update(self, due=due, note=note, priority=priority, wait=wait, translate=translate, previous=previous, next_action=next_action)
updated = True
else:
updated = True
if updated:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ' not required and '+ colour.yellowbright +'skipped' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
found = self
else:
for child in self.children:
found = child.update(task, due=due, note=note, priority=priority, wait=wait, recursive=True, caldav=caldav, previous=previous, caldavsource=caldavsource)
if found is not None:
break
if ((not recursive) and (not caldav)):
self.make_modified(found)
return found
def make_modified_parents(self, task=None):
if task is None:
task = self
if len(self.parents) > 1:
self.parent.make_modified_parents(task=task)
elif len(self.parents) == 1:
self.make_modified(task=task)
return
def check_for_modified_children(self, root=True):
modified = False
if self.modified:
modified = True
for child in self.children:
modified = modified or child.check_for_modified_children(root=False)
if root and modified:
self.set_modified()
return modified
def set_modified(self, task=None):
if task is not None:
name = task.name
else:
name = '[not provided]'
if len(self.parents) > 0:
parentstr = self.parents[-1]
else:
parentstr = '[parent unknown]'
report(colour.magenta+'Marking modified ' + parentstr + '|' + self.name + ' for task ' + name + colour.end)
self.modified = True
def make_modified(self, task):
def to_mark(current, task):
if len(current.parents) == 0:
return False
return (task.parents[1] == current.name and task.parents[0] == current.parents[0])
if len(task.parents) < 2:
return
if to_mark(self, task):
if not self.modified:
self.set_modified(task)
else:
for child in self.children:
child.make_modified(task)
def child_names(self):
names = []
for child in self.children:
names.append(child.name)
return names
def has_children(self):
if len(self.children) > 0:
return True
return False
def is_sequential(self):
return self.flow == 'sequential'
def set_wait(self, string=None):
if string is None:
string = 'wait'
self.wait = string
for child in self.children:
child.set_wait(string)
def set_updated(self, follow=True):
self.updated = True
if follow:
for child in self.children:
child.set_updated(follow=follow)
def is_translate(self):
if self.translate is not None:
if len(self.translate) > 0:
return True
return False
def is_wait(self):
if self.wait is not None:
if len(self.wait) > 0:
return True
return False
def is_available(self):
if self.is_onhold:
return False
if self.error:
return False
#if self.is_wait():
# return False
if self.start is not None:
if self.start > universe.now:
return False
return True
def is_expired(self):
if self.expire is not None:
if self.expire <= universe.now:
return True
return False
def is_active(self):
# Exclude the root and projects
if self.level <= 0:
return False
if self.is_header:
return False
if not self.is_available():
return False
if self.parent.is_wait():
# Only include highest wait
return False
#if (self.parent.is_translate_header() and self.parent.is_wait()):
# # Note onhold wipes out children anyway - here wait is special case
# return False
#if ( len(self.translate) > 0 and len(self.parent.translate) == 0 ):
if self.is_translate_header():
# Header of aux list
# Not great returning True here
return True
# Clause for grouped / lists
if ((not self.is_checklist) and (self.has_children())):
return False
# Restricted to next actions, when sequential
return True
def find_all_names(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self.name)
for child in self.children:
todos = child.find_all_names(todos)
return todos
def find_all_tasks(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self)
for child in self.children:
todos = child.find_all_tasks(todos)
return todos
def find_all_task_occurances(self, task, occurances=None):
if occurances == None:
occurances = 0
if self.is_same_task(task):
occurances +=1
#report(' DUPLICATE CALDAV: ' + str(occurances) + ' ' + task.name)
for child in self.children:
occurances = child.find_all_task_occurances(task, occurances)
return occurances
def find_active(self, active=None):
if active == None:
active = []
if self.is_active():
active.append(self)
self.active = True
is_sequential = self.is_sequential()
for child in self.children:
if child.is_completed:
continue
if not child.is_available():
if is_sequential:
break
continue
active = child.find_active(active)
if is_sequential:
break
return active
def is_valid_task(self):
if self.level <= 0:
return False
if self.is_header:
return False
if self.is_onhold:
return False
if self.error:
return False
return True
def find_next_actions(self, set_updated=True, updated=None):
#if 'Meshing ' in self.name:
# verb=True
#else:
# verb=False
if updated is None:
updated = []
next_action = self.find_next_action()
#if verb: print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
if self.next_action != next_action:
self.next_action = next_action
if set_updated:
self.set_updated(follow=False)
updated.append(self.name)
#print ' UPDATED', self.name
#print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
for child in self.children:
child.find_next_actions(set_updated=set_updated, updated=updated)
return updated
def find_next_action(self):
next_action = None
if self.level <= 0:
return None
if self.parent.is_sequential():
neighbours = self.parent.children
found = False
for neighbour in neighbours:
if found:
if neighbour.is_valid_task():
next_action = neighbour
break
elif neighbour.name == self.name:
found = True
if next_action is None:
return self.parent.find_next_action()
else:
return next_action.name
# next_actions = []
# if len(self.parents) == 0:
# return next_actions
# p = self.parents[-1]
# if not p.is_sequential():
# return next_actions
def find_error(self, error=None):
if error == None:
error = []
if self.error:
error.append(self)
for child in self.children:
error = child.find_error(error)
return error
def show_error(self, show_notes=False):
errors = self.find_error()
if len(errors) == 0: return
report(colour.redbright + 'ERROR' + colour.end)
for task in errors:
report(task.to_string(indentnone=True, notes=show_notes, show_where=True), forced=True)
def is_important(self):
return (self.priority is not None)
def is_due_on_day(self, day):
if self.due is None:
return False
if self.due.year != day.year:
return False
if self.due.month != day.month:
return False
if self.due.day != day.day:
return False
return True
def is_overdue(self):
if self.due is None:
return False
return universe.now > self.due
def is_due_today(self):
return self.is_due_on_day(universe.now)
def is_due_tomorrow(self):
return self.is_due_on_day(universe.now + timedelta(days=1))
def is_overdue_yesterday_or_past(self):
return (self.is_overdue() and (not self.is_due_today()))
def is_overdue_today_tomorrow_important(self):
return (self.is_overdue() or self.is_due_today() or self.is_due_tomorrow() or self.is_important())
def make_due_today(self, displacement=0, avoid_weekends=False):
new_due = None
new_start = None
new_alarm = None
detail = ''
# shift from now time to due time, all today
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + timedelta(seconds=shift)
if self.repeat == 'random':
new_due = universe.now.replace(second=0, microsecond=0) + calculate_delta('random')
else:
new_due = universe.now.replace(hour=self.due.hour, minute=self.due.minute, second=0, microsecond=0)
# Apply displacement days
new_due = new_due + timedelta(days=displacement)
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
# Update start time
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
# Update alarm
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Update due to today for important task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
self.make_modified_parents()
return
def prioritycurrent(self, caldav=False):
# Make tasks with a priority that have a due time in the previous days or past,
# due today at the same time
# Only applied to current active list?
#print self.name
if ((self.is_important() or self.current) and self.is_overdue_yesterday_or_past()):
#print 'HERE', self.name
try:
# Check here if in skipweekendlists
avoid_weekends = ((self.group(masked=False) in universe.skipweekendlists) or self.avoidweekends)
# self.make_due_next_work_day()
self.make_due_today(avoid_weekends=avoid_weekends)
# state.aiyo.make_modified(self)
if caldav:
from CaldavClient import ical_event_update
ical_event_update(self, due=True)
else:
self.set_modified()
except Exception, e:
out = os.linesep + ' Task: ' + self.name + ' ' + self.due.strftime('%y%m%d%H%M')
error('Error in making a priority task current, exception: ' + str(e) + out)
pass
def to_string(self, reformat=False, indentfull=False, indentnone=False, notes=True, show_where=False, show_next_action=False, show_translate_inheritance=False):
iro = generate_mono(reformat)
contentlist = []
if self.lines is not None:
for i in range(len(self.lines)):
contentlist.append('%(num)6d %(indent)2d %(content)s' % { 'num':i, 'indent':indentation(self.lines[i]), 'content':self.lines[i] })
content = os.linesep.join(contentlist)
if not notes:
note = ''
elif self.note is None:
note = ''
elif len(self.note) == 0:
note = ''
else:
note = os.linesep + os.linesep.join([ ' ' * 4 + notelines for notelines in self.note.splitlines() ])
note = iro.grey + note + iro.end
out_due = ''
out_due_date = None
if self.due is not None:
out_due_date = self.due
elif self.alarm is not None:
out_due_date = self.alarm
else:
out_due = ''
if out_due_date is not None:
if self.allday():
out_due = out_due_date.strftime('%y%m%d')
else:
out_due = out_due_date.strftime('%y%m%d%H%M')
# Work out diff
if self.alarm is not None:
out_alarm = self.alarm.strftime('%y%m%d%H%M')
if self.due is not None:
d = self.alarm - self.due
if (self.allday() and d == universe.defaulttime.alldaydiff):
out_alarm = ''
elif (not self.allday() and d == universe.defaulttime.diff):
out_alarm = ''
else:
dh = timedelta_to_human(d)
if dh is not None:
out_alarm = dh
else:
out_alarm = ''
if len(out_alarm) > 0:
out_alarm = ' !' + out_alarm
out_priority = prioritystring(self.priority, spacer=True)
translate = ''
if self.translate is not None:
if self.is_translate():
if (self.parent is None or show_translate_inheritance):
translate = ' =' + self.translate
else:
if not self.parent.is_translate():
translate = ' =' + self.translate
#print self.name, self.translate, translate, self.parent
if show_where:
parents = ' ' + (iro.grey+':'+iro.end).join([ iro.grey + x + iro.end for x in self.parents ])
else:
parents = ''
if show_next_action and (self.next_action is not None) and (len(str(self.next_action)) > 0):
next_action = ' ' + iro.green + universe.next_char + str(self.next_action) + iro.end
else:
next_action = ''
if self.is_overdue():
highlight_name = iro.redbright
elif self.is_due_today():
highlight_name = iro.red
elif self.is_due_tomorrow():
highlight_name = iro.yellow
elif self.priority is not None:
highlight_name = iro.yellow
else:
highlight_name = ''
options = '''\
%(spacer)s%(start)s%(divider)s%(due)s%(expire)s%(alarm)s%(priority)s%(repeat)s%(translate)s%(checklist)s%(flow)s%(header)s%(waitonrepeat)s%(permanent)s%(current)s%(avoidweekends)s%(wait)s%(paused)s%(completed)s%(parents)s%(next)s%(error)s''' \
% {
'start': '' if (self.starttext is None or len(self.starttext) == 0) else iro.cyan + self.starttext + iro.end,
'due': iro.blue + out_due + iro.blue,
'alarm': iro.red + out_alarm + iro.end,
'priority': iro.redbright + out_priority + iro.end,
'divider': '' if (self.starttext is None or len(self.starttext) == 0 ) else iro.grey + ':' + iro.end,
'repeat': '' if (self.repeat is None or len(self.repeat) == 0) else ' ' + iro.magenta + self.repeat + iro.end,
'expire': '' if (self.expiretext is None or len(self.expiretext) == 0) else ' ' + iro.magenta + self.expiretext + iro.end,
'spacer': '' if ((self.starttext is None or len(self.starttext) == 0) and (len(out_due) == 0)) else ' ',
'translate': iro.yellow + translate + iro.end,
'checklist': iro.yellow+' checklist'+iro.end if self.is_checklist else '',
'header': iro.yellow+' header'+iro.end if self.is_header else '',
'completed': iro.green+' completed'+iro.end if self.is_completed else '',
'paused': iro.blue+' hold'+iro.end if self.is_onhold else '',
'permanent': iro.magenta+' permanent'+iro.end if self.is_permanent else '',
'current': iro.magenta+' current'+iro.end if self.current else '',
'avoidweekends': iro.magenta+' avoidweekends'+iro.end if self.avoidweekends else '',
'wait': ' ' + iro.blue+self.wait+iro.end if self.is_wait() else '',
'waitonrepeat': iro.blue+' waitonrepeat'+iro.end if self.waitonrepeat else '',
'error': iro.redbright+' ERROR'+iro.end if self.error else '',
'flow': iro.magenta+' ' + self.flowtext+iro.end if self.flowtext is not None else '',
'parents': parents,
'next': next_action,
}
text = '''%(name)s%(spacer)s%(options)s%(note)s''' \
% {
'name': highlight_name + self.name + iro.end,
'spacer': '' if len(options) == 0 else ' ',
'options': options,
'note': note,
}
if indentnone:
indent = 2
else:
indentmod = 0
if indentfull:
indentmod = 2
if reformat:
indentmod = -1
indent = (self.level + indentmod) * 2
text = os.linesep.join([ ' ' * indent + notelines for notelines in text.splitlines() ])
return text
def __str__(self):
return self.to_string()
def find_children(self):
for i in range(len(self.childblocks)):
block = self.childblocks[i]
parents = []
for p in self.parents + [self.name]:
parents.append(p)
child = FileTodos(self.lines[block[0]:block[1]], parents = parents, number=i+1, parent=self, translate=self.translate)
self.add_child(child)
def find_note(self):
if self.lines is None: return ''
if len(self.lines) == 0: return ''
if self.level == 0:
if indentation(self.lines[0]) < self.level + 1: return ''
else:
if len(self.lines) == 1: return ''
if indentation(self.lines[1]) < self.level + 1: return ''
note = []
for i in range(len(self.lines)):
if ((self.level > 0) and (i == 0)): continue
if indentation(self.lines[i]) < self.level + 1: break
note.append(re.sub('^'+ ' ' * (self.level + 1) * 2, '', self.lines[i]))
if len(note) == 0:
return ''
return os.linesep.join(note)
def set_note(self, obj):
self.note = obj
def add_child(self, obj):
obj.parent = self
self.children.append(obj)
def set_block(self, obj):
self.block = obj
def set_childblocks(self, obj):
self.childblocks = obj
def show_tree(self, indentfull=True, notes=True, activeonly=False, availableonly=False):
if ((activeonly or availableonly) and not self.is_available()): return
if (activeonly and not self.is_active()): return
report(self.to_string(indentfull=indentfull, notes=notes), forced=True)
for child in self.children:
child.show_tree(indentfull=indentfull, notes=notes, activeonly=activeonly, availableonly=availableonly)
def reformat(self):
output = ''
if self.level > 0:
output = self.to_string(reformat=True) + os.linesep
for child in self.children:
output = output + child.reformat()
if (self.level == 0 and self.filenotes is not None):
output = output + os.linesep.join(['',''] + self.filenotes)
return output
def write(self, name=None, category=None):
if not self.modified: return False
if name is None:
name = self.name
if len(self.parents) > 0:
category = self.parents[0]
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/'
if not os.path.exists(filename):
# Could be case here where file exists in place of foldername, this will cause trouble!
os.mkdir(filename)
filename = filename + name
repo_in = os.path.exists(filename)
report(colour.grey + 'Writing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(to' + colour.grey + ' ' + filename + colour.grey + ')' + colour.end)
if not universe.dry:
f = open(filename, 'w')
f.write(self.reformat().encode('utf-8'))
f.close()
if not repo_in:
repo_add(filename)
if self.is_empty():
report(' ' + colour.grey + 'Removing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return True
def identify_blocks(self, start=None, end=None):
lines_to_excluded_section = 2
debug = False
#debug = (self.name == 'finance')
if self.lines is None:
return []
def add_block(r):
blocks.append(r)
if debug: print ' ', r
blocks = []
if start is None:
start = 0
if end is None:
end = len(self.lines)
if len(self.lines) <= 1: return blocks
r = [ -1, -1 ]
blanks = 0
for i in range(start, end):
line = self.lines[i]
indent = indentation(line)
if debug: print i, blanks, r, indent, line
if len(line) == 0:
blanks += 1
continue
# Indent is of current level
if indent == self.level:
# Existing block
if (r[0] > -1 and r[1] == -1):
if debug: print 'complete', blanks, blanks >= 2
r[1] = i
add_block(r)
r = [ -1, -1 ]
if r[0] == -1:
if debug: print 'new'
# If 2 or more previous blanks AND now indent = level
if blanks >= lines_to_excluded_section: break
# Start new block
if len(line.strip()) > 0:
r[0] = i
blanks = 0
# Add concluding block, if one has begun
if ((r[0] > -1) and (r[1] == -1)):
r[1] = i + 1
add_block(r)
if debug: print self.name, blocks
if debug:
report('XXXX'+ self.name)
print blocks
if len(blocks) > 0: print os.linesep.join(self.lines[blocks[-1][0]:blocks[-1][1]])
sys.exit(1)
return blocks
def interpret_task(self, title):
sections = title.split(' ', 1)
if len(sections) == 2:
# Check if len(sections[1]) > 0?
self.name = sections[0]
title = sections[1]
else:
self.name = title
title = ''
words = title.split(' ')
titlelist = []
for word in words:
# NLP not working here, as cannot apply set_modified at this early point of parsing,
# would need to mark to update aiyo at a later stage, once the FileTodo object
# has been set up.
if re.match('^today$', word):
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^tomorrow$', word):
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] \
+ ['mon', 'tues', 'tue', 'wed', 'thurs', 'thu', 'thur', 'fri', 'sat', 'sun']:
self.duetext = next_weekday(word)
self.set_modified()
elif re.match('^\d*(day|week|month|year)s*$', word):
self.duetext = next_increment(word)
self.set_modified()
elif re.match('^\w+:today$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:tomorrow$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:(monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tues|tue|wed|thurs|thu|thur|fri|sat|sun)$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_weekday(self.duetext)
self.set_modified()
elif re.match('^\w+:\d*(day|week|month|year)s*$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_increment(self.duetext)
self.set_modified()
elif re.match('^\d{6}$', word):
self.duetext = word
elif re.match('^\d{10}$', word):
self.duetext = word
elif re.match('^\d{6}:$', word):
self.starttext = word[:-1]
elif re.match('^\d{10}:$', word):
self.starttext = word[:-1]
elif re.match('^\w+:\d{6}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:\d{10}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:$', word):
self.starttext = word[:-1]
elif re.match('^!\d{6}$', word):
self.alarmtext = word[1:]
elif re.match('^!\d{10}$', word):
self.alarmtext = word[1:]
elif (re.match('^!\w+$', word) and is_relative_date(word)):
self.alarmtext = word[1:]
elif re.match('^!$', word):
self.priority = 9
elif re.match('^!!$', word):
self.priority = 5
elif re.match('^!!!$', word):
self.priority = 1
elif re.match('^every\w+$', word):
self.repeat = word
elif re.match('^after\w+$', word):
self.repeat = word
elif re.match('^random$', word):
self.repeat = word
elif word in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
self.repeat = word
elif re.match('^expire\w+$', word):
self.expiretext = word
elif re.match('^checklist$', word):
self.is_checklist = True
elif re.match('^sequential$', word):
self.flowtext = 'sequential'
elif re.match('^parallel$', word):
self.flowtext = 'parallel'
elif re.match('^header$', word):
self.is_header = True
elif re.match('^completed$', word):
self.is_completed = True
elif re.match('^paused$', word):
self.is_onhold = True
elif re.match('^onhold$', word):
self.is_onhold = True
elif re.match('^hold$', word):
self.is_onhold = True
elif re.match('^permanent$', word):
self.is_permanent = True
elif re.match('^avoidweekends$', word):
self.avoidweekends = True
elif re.match('^current$', word):
self.current = True
#elif re.match('^everpresent$', word):
# self.is_everpresent = True
elif re.match('^waitonrepeat$', word):
self.waitonrepeat = True
#self.wait = 'wait'
elif re.match('^wait$', word):
self.wait = word
elif re.match('^ERROR$', word):
self.error = True
# asc
elif re.match('^=\w+$', word):
self.translate = word[1:]
elif re.match('^@\w+$', word):
self.sublist = word[1:]
else:
titlelist.append(word)
if self.flowtext is not None:
self.flow = self.flowtext
|
gpl-3.0
| -4,814,139,854,392,383,000 | 34.845088 | 271 | 0.593531 | false |
npinto/pytest
|
testing/test_helpconfig.py
|
1
|
2191
|
import py, pytest,os
from _pytest.helpconfig import collectattr
def test_version(testdir, pytestconfig):
result = testdir.runpytest("--version")
assert result.ret == 0
#p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines([
'*py.test*%s*imported from*' % (pytest.__version__, )
])
if pytestconfig.pluginmanager._plugin_distinfo:
result.stderr.fnmatch_lines([
"*setuptools registered plugins:",
"*at*",
])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*-v*verbose*",
"*setup.cfg*",
"*minversion*",
])
def test_collectattr():
class A:
def pytest_hello(self):
pass
class B(A):
def pytest_world(self):
pass
methods = py.builtin.sorted(collectattr(B))
assert list(methods) == ['pytest_hello', 'pytest_world']
methods = py.builtin.sorted(collectattr(B()))
assert list(methods) == ['pytest_hello', 'pytest_world']
def test_hookvalidation_unknown(testdir):
testdir.makeconftest("""
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
'*unknown hook*pytest_hello*'
])
def test_hookvalidation_optional(testdir):
testdir.makeconftest("""
import pytest
@pytest.mark.optionalhook
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret == 0
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines([
"*using*pytest*py*",
"*active plugins*",
])
def test_debug(testdir, monkeypatch):
result = testdir.runpytest("--debug")
assert result.ret == 0
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest()
assert result.ret == 0
result.stderr.fnmatch_lines([
"*registered*PluginManager*"
])
|
mit
| -6,169,271,513,890,304,000 | 27.089744 | 61 | 0.61068 | false |
IdeaSolutionsOnline/ERP4R
|
core/objs/mapa_gestao.py
|
1
|
1819
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = ['António Anacleto', 'Jair Medina', 'Abner Oliveira']
__credits__ = []
__version__ = "1.0"
__maintainer__ = ['António Anacleto', 'Jair Medina', 'Jair Medina', 'Abner Oliveira']
__status__ = "Development"
__model_name__= 'mapa_gestao.MapaGestao'
#import base_models#auth,
from orm import *
from form import *
class MapaGestao(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'mapa_gestao'
self.__title__= 'Mapa Gestão'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__workflow__ = (
'estado', {'Rascunho':['Imprimir', 'Valores0', 'Imp.Excel', 'Imp.Primavera', 'Imp.PHC'], 'Confirmado':['Imprimir']}
)
self.__workflow_auth__ = {
'Imprimir':['All'],
'Valores0':['All'],
'Imp.Excel':['All'],
'Imp.PHC':['All'],
'Imp.Primavera':['All'],
'Rascunho':['Gestor'],
}
self.nif = string_field(view_order=1, name='Nif', size=80)
self.ano_fiscal = string_field(view_order=2, name='Ano Fiscal', size=5)
self.estado = info_field(view_order=3, name ='Estado', default='Rascunho', onlist=False, hidden=True, nolabel=True,)
self.mapa = combo_field(view_order=4, name ='Mapa', options=[('balancete','Balancete'), ('balanco','Balanço'), ('demonst_resul','Demonstração de Resultado'), ('fluxoCaixa','Fluxo de Caixa')], onlist=False, default='Praia')
self.linha_mapa_gestao = list_field(view_order = 5, name = 'Valores', model_name = 'linha_mapa_gestao.LinhaMapaGestao', condition = "factura_cli='{id}'", list_edit_mode = 'inline', onlist = False)
|
mit
| 6,957,941,945,899,023,000 | 30.258621 | 230 | 0.56481 | false |
ani2404/ee6761cloud
|
inference.py
|
1
|
1332
|
# Build the model, restore the variables and run the inference
# Need to use SavedModel builder and loader instead - future work
import sys
sys.path.append('/home/ani2404/Desktop/ee6761cloud/')
import numpy as np
#Need to replace with the actual model
from code_ref.model import Model
class infer(object):
def __init__(self,session,checkpoint_dir,image_size_x,image_size_y,resolution_factor=4,batch_size=1):
#Build the model based on resolution factor
self.session = session
self.model = Model(session, checkpoint_dir=checkpoint_dir,batch_size=batch_size,
image_size_x=image_size_x,image_size_y=image_size_y,resolution_factor=resolution_factor)
self.resolution_factor = resolution_factor
# Restores the variables from the checkpoint dir
if self.model.load(checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [*] Load Failed")
def super_resolute(self,input_image):
# Super resolutes the input image
output_images,up_input = self.session.run([self.model.ESCNN,self.model.interpolation],
feed_dict={self.model.inputs:input_image})
output_images = np.array(output_images).astype(np.float32)
return output_images,up_input
|
mit
| 3,867,639,695,548,613,600 | 27.340426 | 115 | 0.660661 | false |
acutesoftware/worldbuild
|
scripts/minecraft/go_minecraft.py
|
1
|
2260
|
# go_minecraft.py
import sys
import time
import aikif.toolbox.interface_windows_tools as mod_tool
players = ['DynamiteBuilder', 'craftandstore']
#server = '1.9'
#server = '1.10'
server = '1.11.2'
seed = 0
if server == '1.11.2':
seed = -7560993781265470572
locations = [
{'name':'home', 'loc':'61 64 239'},
{'name':'Woodlands Mansion', 'loc':'4473 66 5773'},
{'name':'Stronghold', 'loc':'-184 67 1736'},
{'name':'Village', 'loc':'-710 87 548'},
]
elif server == '1.10':
seed = 8239770600742919613
locations = [
{'name':'home', 'loc':'248 66 -61'},
{'name':'farm', 'loc':'960 77 -260' },
{'name':'floating-garden', 'loc':'685 107 -588' },
{'name':'floating-castle', 'loc':'-202 105 -655' },
{'name':'stronghold', 'loc':'415 72 -2198' },
{'name':'village', 'loc':'121 77 -2019' },
{'name':'overhang-lookout/evil storm and zoo / garage', 'loc':'-449 110 -1830' },
{'name':'rock-island / harbour', 'loc':'154 98 384' },
{'name':'enchanted-village','loc':'1082 87 -1297' },
{'name':'flower-garden','loc':'1254 105 -1807' },
]
else:
seed = 2677023417700615710
locations = [
{'name':'v1-home', 'loc':'151 103 736'},
{'name':'v1-treehouse', 'loc':'120 72 662' },
{'name':'v1-castle', 'loc':'-132 68 388' },
{'name':'v1-village', 'loc':'-298 82 946' },
{'name':'v1-stables', 'loc':'-602 82 951' },
{'name':'v1-desert', 'loc':'-1524 97 1580' },
]
print('Minecraft Teleport Service for players ' + str(players))
print('(server version=' + server + ', seed = ' + str(seed) + ' )')
for num, l in enumerate(locations):
print(str(num+1) + ' = ' + l['name'])
loc = locations[int(input('Enter Location ')) - 1]
mod_tool.app_activate('Minecraft server')
for p in players:
print('Teleporting ' + p + ' to ' + loc['name'] + ' (' + loc['loc'] + ')')
mod_tool.send_keys('/tp ' + p + ' ' + loc['loc'])
mod_tool.send_keys("{ENTER}") # needs Enter key
time.sleep(0.1)
|
gpl-2.0
| -5,863,558,261,222,107,000 | 31.285714 | 89 | 0.494248 | false |
ds-hwang/chromium-crosswalk
|
mojo/public/tools/manifest/manifest_collator.py
|
2
|
1537
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import shutil
import sys
import urlparse
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
parsed = urlparse.urlparse(parent['url'])
if args.application_name != parsed.hostname:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, parsed.hostname))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
| 8,324,561,153,217,035,000 | 26.446429 | 80 | 0.681848 | false |
ada-x/respect_mah_authoritay
|
movies_project.py
|
1
|
2198
|
import movies # my file with the class definition
import fresh_tomatoes # renders site
pi_movie = movies.Movie('Pi',
'https://www.youtube.com/watch?v=jo18VIoR2xU',
'a mathematician makes an incredible discovery',
'http://images.moviepostershop.com/pi-movie-poster-1998-1020474533.jpg')
big_fish = movies.Movie('Big Fish',
'https://www.youtube.com/watch?v=M3YVTgTl-F0',
'a story about the stories between a father and son',
'http://www.gstatic.com/tv/thumb/movieposters/32942/p32942_p_v8_aa.jpg')
gone_in_60_seconds = movies.Movie('Gone In 60 Seconds',
'https://www.youtube.com/watch?v=o6AyAM1buQ8',
'A reformed car thief is given three days to steal 50 pristine autos',
'http://www.gstatic.com/tv/thumb/movieposters/25612/p25612_p_v8_aa.jpg')
lauberge_espagnole = movies.Movie('L\'auberge Espagnole',
'https://www.youtube.com/watch?v=CCs6AzLeNQI',
'a student\'s adventures living in Barcelona',
'http://www.gstatic.com/tv/thumb/dvdboxart/30919/p30919_d_v8_aa.jpg')
lilo_and_stitch = movies.Movie('Lilo and Stitch',
'https://www.youtube.com/watch?v=hu9bERy7XGY',
'a lonely little girl gets an extra-terrestrial friend',
'http://img.lum.dolimg.com/v1/images/open-uri20150422-12561-1dajwj_23920e88.jpeg?region=0%2C0%2C1000%2C1409')
idiocracy = movies.Movie('Idiocracy',
'https://www.youtube.com/watch?v=BBvIweCIgwk',
'an average american wakes up in the future',
'http://www.gstatic.com/tv/thumb/dvdboxart/159395/p159395_d_v8_aa.jpg')
movies_list = [pi_movie, lilo_and_stitch, lauberge_espagnole,
gone_in_60_seconds, big_fish, idiocracy]
# print(movies_list)
# pi_movie.show_trailer()
# opens and renders display
fresh_tomatoes.open_movies_page(movies_list)
|
unlicense
| 8,596,849,178,169,684,000 | 51.333333 | 140 | 0.572793 | false |
default1406/PhyLab
|
PythonExperimentDataHandle/phylab.py
|
1
|
4356
|
# -*- coding: utf-8 -*-
from math import sqrt
#将二维列表x中的每一个值保留b位小数(带四舍五入)
def RoundTwo(x,b):
for i in range(len(x)):
for j in range(len(x[i])):
x[i][j] = round(x[i][j],b)
if b == 0:
x[i][j] = ("%d" %x[i][j])
elif b == 1:
x[i][j] = ("%.1f" %x[i][j])
elif b == 2:
x[i][j] = ("%.2f" %x[i][j])
elif b == 3:
x[i][j] = ("%.3f" %x[i][j])
elif b == 4:
x[i][j] = ("%.4f" %x[i][j])
elif b == 5:
x[i][j] = ("%.5f" %x[i][j])
elif b == 6:
x[i][j] = ("%.6f" %x[i][j])
#将一维列表x中的每一个值保留b位小数(带四舍五入)
def RoundOne(x,b):
for i in range(len(x)):
x[i] = round(x[i],b)
if b == 0:
x[i] = ("%d" %x[i])
elif b == 1:
x[i] = ("%.1f" %x[i])
elif b == 2:
x[i] = ("%.2f" %x[i])
elif b == 3:
x[i] = ("%.3f" %x[i])
elif b == 4:
x[i] = ("%.4f" %x[i])
elif b == 5:
x[i] = ("%.5f" %x[i])
elif b == 6:
x[i] = ("%.6f" %x[i])
#计算a类不确定度:x是一个列表,aver是x的平均值,k是数据的组数(不一定等于len(x),
# 因为x后面可能添加了x的平均值)
def Ua(x, aver, k) :
sumx = 0
for i in range(k):
sumx += (x[i] - aver)**2
return sqrt(sumx/(k*(k-1)))
#匹配最终结果:(f+u_f)
#输入算出来的最终结果和它的不确定度,可以返回最终结果的形式
def BitAdapt(x,u_x) :
ten = 0
ften = 0
if (u_x >= 10):
temp = x
while(temp >= 10):
temp = temp/10
ten += 1
x = float(x)/10**ten
u_x = float(u_x)/10**ten
elif (x < 0.001):
temp = x
ften = 0
while(temp < 1):
temp = temp*10
ften += 1
x = float(x) * 10**ften
u_x = float(u_x) * 10**ften
Tempbit = 0
bit = 0
while (1):
i = 0
while(1):
temp = float(u_x)*(10**i)
if(temp >= 1):
bit = i
break
else :
i+=1
u_x = round(float(u_x),bit)
x = round(float(x),bit)
u_x = ("%.*f"%(bit, u_x))
x = ("%.*f"%(bit, x))
# if bit == 0:
# u_x = ("%d" % u_x)
# x = ("%d" % x)
# elif bit == 1:
# u_x = ("%.1f" % u_x)
# x = ("%.1f" % x)
# elif bit == 2:
# u_x = ("%.2f" % u_x)
# x = ("%.2f" % x)
# elif bit == 3:
# u_x = ("%.3f" % u_x)
# x = ("%.3f" % x)
# elif bit == 4:
# u_x = ("%.4f" % u_x)
# x = ("%.4f" % x)
# elif bit == 5:
# u_x = ("%.5f" % u_x)
# x = ("%.5f" % x)
# elif bit == 6:
# u_x = ("%.6f" % u_x)
# x = ("%.6f" % x)
# elif bit == 7:
# u_x = ("%.7f" % u_x)
# x = ("%.7f" % x)
# elif bit == 8:
# u_x = ("%.8f" % u_x)
# x = ("%.8f" % x)
i = 0
while(1):
temp = float(u_x)*(10**i)
if(temp >= 1):
Tempbit = i
break
else :
i+=1
if Tempbit == bit:
break
if ten > 0:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + "){\\times}10^{" + str(ten) + "}"
elif ften > 0:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + "){\\times}10^{-" + str(ften) + "}"
else:
x = "(" + str(x) + "\\pm"
u_x = str(u_x) + ")"
return x + u_x
#转换为科学计数法表示
def ToScience(number):
Tempstr = format(number,'.4g')
#如果发现Tempstr中含有e的话,说明是科学计数法
if 'e' in Tempstr:
index_str = Tempstr.split('e')
if index_str[0] == '1':
return '10^{'+str(int(index_str[1]))+'}'
else:
return index_str[0]+'{\\times}10^{'+str(int(index_str[1]))+'}'
else:
return Tempstr
#对于x和y两个一维列表进行一元线性处理:y = a + bx
#返回列表[b,r]
def ULR(x,y):
size = len(x)-1
x_2 = []
y_2 = []
xy = []
for i in range(size):
x_2.append(x[i]**2)
y_2.append(y[i]**2)
xy.append(x[i] * y[i])
x_2.append(sum(x_2)/size)
y_2.append(sum(y_2)/size)
xy.append(sum(xy)/size)
b = (x[size]*y[size]-xy[size])/(pow(x[size],2)-x_2[size])
r = (xy[size] - x[size]*y[size]) / sqrt((x_2[size] - pow(x[size],2))*(y_2[size]-pow(y[size],2)))
res = [b,r]
return res
#求仪器误差限
def DELTA_R(R):
res = 0.02 + R%1*5/100.0
R = R - R%1
res = res + R%10*5/1000.0
R = R - R%10
res = res + R%100*2/1000.0
R = R - R%100
res = res + R/1000.0
return res
#逐差法求
def DWM(x):
res = []
size = len(x)/2
for i in range(size):
temp = abs(x[i]-x[i+size])
res.append(temp)
return res
#测试时的误差要求,误差范围内就返回1,否则就返回0
def Mistake(x,y):
x = abs(x)
y = abs(y)
r1 = x+x/100
r2 = x-x/100
if (y > r1) | (y <r2):
return 0
else:
return 1
|
gpl-2.0
| -669,399,115,657,459,200 | 18.959391 | 97 | 0.453204 | false |
impakho/PyProxy
|
server.py
|
1
|
7402
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import socket
import threading
def isset(obj):
if obj == 0: return 0
return 1
def getMethod(header):
if 'CONNECT' in header[0:10]: return 1
return 0
def getHost(header):
rn = '\r\n'
kw = 'Host: '
if not kw in header: return ''
hostSplit = header.split(kw)[1]
if not rn in hostSplit: return ''
return hostSplit.split(rn)[0]
def getHost_IP(header):
ip = getHost(header)
kw = ':'
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if not len(ip): return ''
if kw in ip: ip = ip.split(kw)[0]
try:
ip = socket.gethostbyname(ip)
if '127.0.0.' in ip: ip = ''
return ip
except:
return ''
def getHost_Port(header):
port = getHost(header)
kw = ':'
if not len(port): return 0
if kw in port:
return int(port.split(kw)[1])
else:
return 80
def clientIn(client, address):
succ = 1
header = ''
headerOver = 0
method = -1
ip = ''
port = 0
length = -1
lengthRecv = -1
while True:
try:
client.settimeout(30)
recv = client.recv(10)[::-1]
except:
print('Client Timeout')
succ = 0
break
if len(recv):
if headerOver == 0:
header += recv
if '\r\n\r\n' in header: headerOver = 1
if '\r\n' in header and method == -1:
method = getMethod(header)
if ' http://' in header:
headerReStr = 'http://' + header.split('http://')[1].split('/')[0]
header = header.replace(headerReStr, '')
if ip == '': ip = getHost_IP(header)
if port == 0 and method == 0: port = getHost_Port(header)
if port == 0 and method == 1: port = 443
if '\r\nProxy-Connection: ' in header:
headerReStr = '\r\nProxy-Connection: '
header = header.replace(headerReStr, '\r\nConnection: ')
if '\r\nContent-Length: ' in header and length == -1:
lengthSplit = header.split('\r\nContent-Length: ')[1]
if '\r\n' in lengthSplit: length = int(lengthSplit.split('\r\n')[0])
if headerOver == 1:
if lengthRecv == -1:
lengthRecv = len(header.split('\r\n\r\n')[1])
sockr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sockr.connect((ip, port))
except:
print('ConnectERR: ' + ip + ':' + str(port))
succ = 0
break
else:
header = recv
lengthRecv = len(header)
if length != -1 and length != 0:
if length > lengthRecv:
length -= lengthRecv
else:
recv = recv[:length]
length = 0
if method == 1: break
try:
sockr.send(header)
except:
print('Socket Closed')
succ = 0
break
if length == -1 or length == 0:
break
else:
break
if succ == 1:
if method == 0:
sockrRecv(client, sockr)
if method == 1:
client.send('HTTP/1.1 200 Connection Established\r\n\r\n')
threadRecvSSL = threading.Thread(target=sockrRecvSSL, args=(client, sockr))
threadRecvSSL.start()
threadSendSSL = threading.Thread(target=sockrSendSSL, args=(client, sockr))
threadSendSSL.start()
else:
client.close()
def sockrRecv(client, sockr):
SSL = 0
header = ''
headerOver = 0
status = 1
length = -1
lengthRecv = -1
gzip = 0
while True:
try:
sockr.settimeout(30)
recv = sockr.recv(256)
except:
print('Socket Timeout')
break
if len(recv):
if headerOver == 0:
header += recv
if '\r\n\r\n' in header: headerOver = 1
if '\r\n' in header and status == 1:
statusSplit = header.split('\r\n')[0]
if ' 1' in statusSplit: status = 0
if '204' in statusSplit: status = 0
if '304' in statusSplit: status = 0
if '\r\nContent-Length: ' in header and length == -1:
lengthSplit = header.split('\r\nContent-Length: ')[1]
if '\r\n' in lengthSplit: length = int(lengthSplit.split('\r\n')[0])
if '\r\nTransfer-Encoding: chunked\r\n' in header and gzip == 0: gzip = 1
if headerOver == 1:
if lengthRecv == -1:
lengthRecv = len(header.split('\r\n\r\n')[1])
else:
lengthRecv = len(recv)
if status == 0:
recv = recv.split('\r\n\r\n')[0] + '\r\n\r\n'
elif length != -1:
if length > lengthRecv:
length -= lengthRecv
else:
recv = recv[:length]
length = 0
elif gzip == 1:
if '\r\n0\r\n\r\n' in recv:
recv = recv.split('\r\n0\r\n\r\n')[0] + '\r\n0\r\n\r\n'
gzip = -1
if header == 'HTTP/1.1 200 Connection Established\r\n\r\n':
threadRecvSSL = threading.Thread(target=sockrRecvSSL, args=(client, sockr))
threadRecvSSL.start()
threadSendSSL = threading.Thread(target=sockrSendSSL, args=(client, sockr))
threadSendSSL.start()
SSL = 1
length = 0
try:
client.send(recv)
except:
print('Client Closed')
break
if headerOver == 1:
if status == 0 or length == 0 or gzip == -1:
break
else:
break
if SSL == 0:
sockr.close()
client.close()
def sockrRecvSSL(client, sockr):
while True:
try:
sockr.settimeout(60)
recv = sockr.recv(256)
except:
break
if len(recv):
try:
client.send(recv)
except:
break
else:
break
sockr.close()
client.close()
def sockrSendSSL(client, sockr):
while True:
try:
client.settimeout(60)
recv = client.recv(10)
except:
break
if len(recv):
try:
sockr.send(recv)
except:
break
else:
break
sockr.close()
client.close()
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('0.0.0.0', 5200))
sock.listen(4096)
while True:
client, address = sock.accept()
thread = threading.Thread(target=clientIn, args=(client, address))
thread.start()
if __name__ == '__main__':
main()
|
mit
| 6,333,505,266,663,583,000 | 29.460905 | 91 | 0.450014 | false |
alisheykhi/SocialPDA
|
graph_util.py
|
1
|
8934
|
import re,collections,operator
import networkx as nx
from privacy_level import privacy_level_generator
from numpy.random import zipf
from math import ceil
class ReadGraph():
extension = []
G = nx.Graph()
properties = {}
nodes = []
edges = []
privacy_level = []
sorted_degree_sequence = []
def __init__(self, file_name,level):
print "-----------------------------------------------------------"
print "___________________Developed for___________________________"
print "-----------------------------------------------------------"
print "title: SocialPDA: A Structure-Aware Approach for Personalized Degree Anonymity in Social Network Graphs"
print "Author: Ali Sheykhi and Mahdi Abadi"
print "Faculty of Electrical and Computer Engineering, Tarbiat Modares University, Tehran, Iran"
print "{ali.sheykhi, abadi}@modares.ac.ir"
print "-----------------------------------------------------------"
print "___________________Initial Setup___________________________"
print "-----------------------------------------------------------"
self.file_name = file_name
print "file name : ",self.file_name
ReadGraph.extension = ["csv", "txt", "gml", "net"]
self.converter(level)
def converter(self,level):
'''
chose correct converter
:return:
'''
file_type = self.file_type()
if file_type == "gml":
print "Convert gml file ... "
self.gml_to_graph(level)
if file_type == "txt":
print "Convert txt file ... "
self.txt_to_graph(level)
def file_type(self):
'''
return dataSet file type
:return: file name
'''
if self.is_valid():
return self.file_name.split(".")[-1]
def is_valid(self):
'''
check for valid graph type
:return:
'''
file_extension = self.file_name.split(".")[-1]
if (file_extension):
if (file_extension.lower() in ReadGraph.extension):
return True
else:
print "Unknown file extension \"",file_extension,"\", use:",ReadGraph.extension
return False
else:
print "file does not have an extension!"
return False
def gml_to_graph(self,level):
'''
convert gml graph to TUNGraph
:return:
'''
# try:
# file_path = "DataSet/"+self.file_name
# except:
# print "can't open "+self.file_name
# else:
# print "reading gml file ... "
# M = nx.MultiGraph(nx.read_gml('DataSet/polblogs.gml'))
# for u,v,data in M.edges_iter(data=True):
# if ReadGraph.G.has_edge(u,v):
# pass
# else:
# ReadGraph.G.add_edge(u, v)
# ReadGraph.properties ['edge_count'] = len(ReadGraph.edges)
# print len(ReadGraph.G.node)
# self.degree_sequence()
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading gml file ... "
pattern_meas = re.compile(r"source\s(\d+)\s+target\s(\d+)", re.VERBOSE | re.MULTILINE)
pattern_id = re.compile(r"id\s(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
ReadGraph.edges.append("%s,%s" % (match.group(1), match.group(2)))
for match in pattern_id.finditer(text):
ReadGraph.nodes.append("%s" % match.group(1))
node_count = 0
for node in ReadGraph.nodes:
ReadGraph.G.add_node(int(node))
node_count += 1
for edge in ReadGraph.edges:
ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def txt_to_graph(self,level):
"""
convert txt graph to TNUGraph
:return:
"""
try:
file_path = "DataSet/"+self.file_name
ifile = open(file_path ,'r')
except:
print "can't open "+self.file_name
else:
text = ifile.read()
ifile.close()
if text:
print "reading txt file ... "
nodes_list = []
if self.file_name.split(".")[0] == 'caida':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'caida_test':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)\s+([-]?\d+)$", re.VERBOSE | re.MULTILINE)
if self.file_name.split(".")[0] == 'amazon':
pattern_meas = re.compile(r"^(\d+)\s+(\d+)", re.VERBOSE | re.MULTILINE)
for match in pattern_meas.finditer(text):
# nodes_list.append("%s" % int(match.group(1)))
# nodes_list.append("%s" % int(match.group(2)))
ReadGraph.G.add_edge(int(match.group(1)),int( match.group(2)))
# ReadGraph.nodes = list(set(nodes_list))
# for node in ReadGraph.nodes:
# ReadGraph.G.add_node(int(node))
# for edge in ReadGraph.edges:
# ReadGraph.G.add_edge(int(edge.split(",")[0]) ,int( edge.split(",")[1]))
sum = 0
count = 0
for NI in ReadGraph.G.degree().values():
#print "node: %d, out-degree %d, in-degree %d" % ( NI.GetId(), NI.GetOutDeg(), NI.GetInDeg())
sum += NI
count+=1
ReadGraph.properties ['edge_count'] = sum/2
self.degree_sequence(level)
def degree_sequence(self,level):
print nx.info(ReadGraph.G)
result_in_degree = ReadGraph.G.degree().values()
privacy_file_name = self.file_name.split(".")[0]+"_privacy.txt"
privacy_level = privacy_level_generator(file_name=privacy_file_name,lvl =level)
# departed = []
for node in ReadGraph.G.nodes():
if ReadGraph.G.degree(node):
current_node = dict(degree = ReadGraph.G.degree(node), id=node)
ReadGraph.sorted_degree_sequence.append(current_node)
# if ReadGraph.G.degree(node) == 1:
# departed.append(list(ReadGraph.G.edges_iter(node))[0])
# for item in departed:
# for item2 in departed:
# if item[1] == item2[0]:
# print item, item2
ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['degree']), reverse=True)
# for i in range (0,5):
# print ReadGraph.sorted_degree_sequence[i]
for i in range(0, len(ReadGraph.sorted_degree_sequence)):
if ReadGraph.sorted_degree_sequence[i]:
ReadGraph.sorted_degree_sequence[i]['privacy_level'] = int(privacy_level[i])
#ReadGraph.sorted_degree_sequence.sort(key=lambda x:(x['privacy_level'],x['degree']), reverse=True)
ReadGraph.properties['node_count'] = len(ReadGraph.sorted_degree_sequence)
max_degree = None
max_degree_id = None
for node in ReadGraph.sorted_degree_sequence:
if node['degree'] > max_degree:
max_degree = node['degree']
max_degree_id = node['id']
ReadGraph.properties ['max_degree_id'] = max_degree_id
ReadGraph.properties ['max_privacy'] = ReadGraph.sorted_degree_sequence[0]['privacy_level']
ReadGraph.properties ['max_privacy_id'] = ReadGraph.sorted_degree_sequence[0]['id']
ReadGraph.properties ['max_degree_size'] = max_degree
ReadGraph.properties ['avg_degree'] = 2 * (float (ReadGraph.properties ['edge_count'])/float (ReadGraph.properties ['node_count']))
node_occur = collections.Counter (result_in_degree)
sorted_node_oc = sorted(node_occur.items(), key=operator.itemgetter(1))
ReadGraph.properties ['k'] = sorted_node_oc[0][1]
print ReadGraph.properties
print "for example, the first node in sorted degree sequence is :" + str(ReadGraph.sorted_degree_sequence[0])
|
apache-2.0
| -4,987,266,179,027,643,000 | 39.243243 | 143 | 0.504589 | false |
EMSTrack/WebServerAndClient
|
equipment/serializers.py
|
1
|
1031
|
from rest_framework import serializers
from .models import EquipmentItem, Equipment
class EquipmentItemSerializer(serializers.ModelSerializer):
equipment_name = serializers.CharField(source='equipment.name')
equipment_type = serializers.CharField(source='equipment.type')
class Meta:
model = EquipmentItem
fields = ('equipmentholder_id',
'equipment_id', 'equipment_name', 'equipment_type',
'value', 'comment',
'updated_by', 'updated_on')
read_only_fields = ('equipmentholder_id',
'equipment_id', 'equipment_name', 'equipment_type',
'updated_by',)
# def validate(self, data):
# # call super
# validated_data = super().validate(data)
#
# # TODO: validate equipment value using equipment_type
# return validated_data
class EquipmentSerializer(serializers.ModelSerializer):
class Meta:
model = Equipment
fields = '__all__'
|
bsd-3-clause
| -6,224,577,214,652,226,000 | 32.290323 | 79 | 0.608147 | false |
interlegis/sapl
|
sapl/sessao/migrations/0046_auto_20191001_1115.py
|
1
|
2629
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-10-01 14:15
from __future__ import unicode_literals
from django.db import migrations, models
import sapl.sessao.models
import sapl.utils
class Migration(migrations.Migration):
dependencies = [
('sessao', '0045_auto_20190816_1337'),
]
operations = [
migrations.AlterField(
model_name='justificativaausencia',
name='upload_anexo',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.anexo_upload_path, verbose_name='Anexo de Justificativa'),
),
migrations.AlterField(
model_name='orador',
name='upload_anexo',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.anexo_upload_path, verbose_name='Anexo do Orador'),
),
migrations.AlterField(
model_name='oradorexpediente',
name='upload_anexo',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.anexo_upload_path, verbose_name='Anexo do Orador'),
),
migrations.AlterField(
model_name='oradorordemdia',
name='upload_anexo',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.anexo_upload_path, verbose_name='Anexo do Orador'),
),
migrations.AlterField(
model_name='sessaoplenaria',
name='upload_anexo',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.anexo_upload_path, verbose_name='Anexo da Sessão'),
),
migrations.AlterField(
model_name='sessaoplenaria',
name='upload_ata',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.ata_upload_path, validators=[sapl.utils.restringe_tipos_de_arquivo_txt], verbose_name='Ata da Sessão'),
),
migrations.AlterField(
model_name='sessaoplenaria',
name='upload_pauta',
field=models.FileField(blank=True, max_length=200, null=True, storage=sapl.utils.OverwriteStorage(), upload_to=sapl.sessao.models.pauta_upload_path, validators=[sapl.utils.restringe_tipos_de_arquivo_txt], verbose_name='Pauta da Sessão'),
),
]
|
gpl-3.0
| 8,936,751,130,462,044,000 | 49.5 | 249 | 0.660701 | false |
hpcloud-mon/monasca-events-api
|
monasca_events_api/common/repositories/mysql/streams_repository.py
|
1
|
12836
|
# Copyright 2015 Hewlett-Packard
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import MySQLdb
from oslo_log import log
from oslo_utils import timeutils
from monasca_events_api.common.repositories import constants
from monasca_events_api.common.repositories import exceptions
from monasca_events_api.common.repositories.mysql import mysql_repository
from monasca_events_api.common.repositories import streams_repository as sdr
LOG = log.getLogger(__name__)
class StreamsRepository(mysql_repository.MySQLRepository,
sdr.StreamsRepository):
base_query = """
select sd.id, sd.tenant_id, sd.name, sd.description,
sd.select_by, sd.group_by, sd.fire_criteria, sd.expiration,
sd.actions_enabled, sd.created_at,
sd.updated_at, sd.deleted_at,
saf.fire_actions, sae.expire_actions
from stream_definition as sd
left join (select stream_definition_id,
group_concat(action_id) as fire_actions
from stream_actions
where action_type = 'FIRE'
group by stream_definition_id) as saf
on saf.stream_definition_id = sd.id
left join (select stream_definition_id,
group_concat(action_id) as expire_actions
from stream_actions
where action_type = 'EXPIRE'
group by stream_definition_id) as sae
on sae.stream_definition_id = sd.id
"""
def __init__(self):
super(StreamsRepository, self).__init__()
@mysql_repository.mysql_try_catch_block
def get_stream_definition(self, tenant_id, stream_definition_id):
parms = [tenant_id, stream_definition_id]
where_clause = """ where sd.tenant_id = %s
and sd.id = %s
and deleted_at is NULL """
query = StreamsRepository.base_query + where_clause
rows = self._execute_query(query, parms)
if rows:
return rows[0]
else:
raise exceptions.DoesNotExistException
@mysql_repository.mysql_try_catch_block
def get_stream_definitions(self, tenant_id, name, offset=None, limit=None):
parms = [tenant_id]
select_clause = StreamsRepository.base_query
where_clause = " where sd.tenant_id = %s and deleted_at is NULL "
if name:
where_clause += " and sd.name = %s "
parms.append(name.encode('utf8'))
if offset is not None:
order_by_clause = " order by sd.id, sd.created_at "
where_clause += " and sd.id > %s "
parms.append(offset.encode('utf8'))
limit_clause = " limit %s "
parms.append(constants.PAGE_LIMIT)
else:
order_by_clause = " order by sd.created_at "
limit_clause = ""
if limit:
limit_clause = " limit %s"
parms.append(int(limit))
query = select_clause + where_clause + order_by_clause + limit_clause
return self._execute_query(query, parms)
@mysql_repository.mysql_try_catch_block
def get_all_stream_definitions(self, offset=None, limit=None):
parms = []
select_clause = StreamsRepository.base_query
where_clause = " where deleted_at is NULL "
if offset is not None:
order_by_clause = " order by sd.id, sd.created_at "
where_clause += " and sd.id > %s "
parms.append(offset.encode('utf8'))
limit_clause = " limit %s "
if limit is not None:
parms.append(limit)
else:
parms.append(constants.PAGE_LIMIT)
else:
order_by_clause = " order by sd.created_at "
limit_clause = ""
query = select_clause + where_clause + order_by_clause + limit_clause
return self._execute_query(query, parms)
@mysql_repository.mysql_try_catch_block
def delete_stream_definition(self, tenant_id, stream_definition_id):
"""Delete the stream definition.
:param tenant_id:
:param stream_definition_id:
:returns True: -- if stream definition exists and was deleted.
:returns False: -- if the stream definition does not exists.
:raises RepositoryException:
"""
cnxn, cursor = self._get_cnxn_cursor_tuple()
with cnxn:
cursor.execute("""delete from stream_definition
where tenant_id = %s and id = %s""",
[tenant_id, stream_definition_id])
if cursor.rowcount < 1:
return False
return True
@mysql_repository.mysql_try_catch_block
def create_stream_definition(self,
tenant_id,
name,
description,
select,
group_by,
fire_criteria,
expiration,
fire_actions,
expire_actions):
cnxn, cursor = self._get_cnxn_cursor_tuple()
with cnxn:
now = timeutils.utcnow()
stream_definition_id = str(uuid.uuid1())
try:
cursor.execute("""insert into stream_definition(
id,
tenant_id,
name,
description,
select_by,
group_by,
fire_criteria,
expiration,
created_at,
updated_at)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s,
%s)""", (
stream_definition_id, tenant_id, name.encode('utf8'),
description.encode('utf8'), select.encode('utf8'),
group_by.encode('utf8'), fire_criteria.encode('utf8'),
expiration, now, now))
except MySQLdb.IntegrityError as e:
code, msg = e
if code == 1062:
raise exceptions.AlreadyExistsException(
'Stream Definition already '
'exists for tenant_id: {0} name: {1}'.format(
tenant_id, name.encode('utf8')))
else:
raise e
self._insert_into_stream_actions(cursor, stream_definition_id,
fire_actions, u"FIRE")
self._insert_into_stream_actions(cursor, stream_definition_id,
expire_actions,
u"EXPIRE")
return stream_definition_id
@mysql_repository.mysql_try_catch_block
def patch_stream_definition(self, tenant_id, stream_definition_id, name, description, select, group_by,
fire_criteria, expiration, fire_actions, expire_actions):
cnxn, cursor = self._get_cnxn_cursor_tuple()
with cnxn:
# Get the original alarm definition from the DB
parms = [tenant_id, stream_definition_id]
where_clause = """ where sd.tenant_id = %s
and sd.id = %s"""
query = StreamsRepository.base_query + where_clause
cursor.execute(query, parms)
if cursor.rowcount < 1:
raise exceptions.DoesNotExistException
original_definition = cursor.fetchall()[0]
# Update that stream definition in the database
patch_query = """
update stream_definition
set name = %s,
description = %s,
select_by = %s,
group_by = %s,
fire_criteria = %s,
expiration = %s,
updated_at = %s
where tenant_id = %s and id = %s"""
if name is None:
name = original_definition['name']
if description is None:
description = original_definition['description']
if select is None:
select = original_definition['select_by']
if select != original_definition['select_by']:
msg = "select_by must not change".encode('utf8')
raise exceptions.InvalidUpdateException(msg)
if group_by is None:
group_by = original_definition['group_by']
if group_by != original_definition['group_by']:
msg = "group_by must not change".encode('utf8')
raise exceptions.InvalidUpdateException(msg)
if fire_criteria is None:
fire_criteria = original_definition['fire_criteria']
if expiration is None:
expiration = original_definition['expiration']
now = timeutils.utcnow()
update_parms = [
name,
description,
select,
group_by,
fire_criteria,
expiration,
now,
tenant_id,
stream_definition_id]
cursor.execute(patch_query, update_parms)
# Update the fire and expire actions in the database if defined
if fire_actions is not None:
self._delete_stream_actions(cursor, stream_definition_id,
u'FIRE')
if expire_actions is not None:
self._delete_stream_actions(cursor, stream_definition_id,
u'EXPIRE')
self._insert_into_stream_actions(cursor, stream_definition_id,
fire_actions,
u"FIRE")
self._insert_into_stream_actions(cursor, stream_definition_id,
expire_actions,
u"EXPIRE")
# Get updated entry from mysql
cursor.execute(query, parms)
return cursor.fetchall()[0]
def _delete_stream_actions(self, cursor, stream_definition_id, action_type):
query = """
delete
from stream_actions
where stream_definition_id = %s and action_type = %s
"""
parms = [stream_definition_id, action_type.encode('utf8')]
cursor.execute(query, parms)
def _insert_into_stream_actions(self, cursor, stream_definition_id,
actions, action_type):
if actions is None:
return
for action in actions:
cursor.execute(
"select id,type from notification_method where id = %s",
(action.encode('utf8'),))
row = cursor.fetchone()
if not row:
raise exceptions.InvalidUpdateException(
"Non-existent notification id {} submitted for {} "
"notification action".format(action.encode('utf8'),
action_type.encode('utf8')))
else:
if row['type'] == 'PAGERDUTY':
raise exceptions.InvalidUpdateException(
"PAGERDUTY action not supported for "
"notification id {} submitted for {} "
"notification action".format(
action.encode('utf8'),
action_type.encode('utf8')))
cursor.execute("""insert into stream_actions(
stream_definition_id,
action_type,
action_id)
values(%s,%s,%s)""", (
stream_definition_id, action_type.encode('utf8'),
action.encode('utf8')))
|
apache-2.0
| 7,464,351,899,179,540,000 | 36.205797 | 107 | 0.507557 | false |
vhb/dotfiles
|
vim/ycm_extra_conf.py
|
1
|
2505
|
import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-W',
'-Wextra',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-I',
'/usr/include/c++/4.2.1/'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
|
mit
| -1,765,271,193,283,489,800 | 28.821429 | 79 | 0.625948 | false |
barneygale/mcocr
|
app/server.py
|
1
|
2546
|
import StringIO
import asyncore
import socket
import urlparse
import re
import settings as settings_herp
import os
import mimetypes
import time
import traceback
import docs
import http
mimetypes.init()
response_reasons = {
200: 'OK',
304: 'Not Modified',
404: 'Not Found',
500: 'Internal Server Error',
501: 'Not Implemented'}
handlers = {}
for name in dir(docs):
if name.endswith('Doc'):
handlers[re.compile(getattr(docs, name).expression)] = getattr(docs, name)
class Server:
def __init__(self):
#Settings handler
self.settings = settings_herp.Settings()
try:
self.settings.load()
except:
self.settings.create()
def serve_forever(self):
self.client_dispatcher = self.ConnectionDispatcher(self.settings)
asyncore.loop(use_poll = False)
#######
#######
#Dispatches incoming connections to a new handler.
class ConnectionDispatcher(asyncore.dispatcher):
id = 0
current_id = 1
def __init__(self, settings):
asyncore.dispatcher.__init__(self)
self.settings = settings
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((settings['server_host'], settings['server_port']))
self.listen(settings['server_limit'])
def handle_accept(self):
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
handler = Server.ConnectionHandler(sock)
handler.settings = self.settings
handler.id = self.current_id
self.current_id += 1
class ConnectionHandler(asyncore.dispatcher):
rbuff = ""
wbuff = ""
def handle_read(self):
self.rbuff += self.recv(self.settings['server_buffersize'])
try:
request = http.HTTPRequest()
request.decode(self.rbuff)
self.rbuff = ""
for i in handlers.iteritems():
m = i[0].match(request._data['path_path'])
if m:
i[1].handle_request(self, request, m.groupdict())
return
#Error state: no handlers recognise the URL!
err = http.HTTPResponse(responsecode=501)
print err.encode()
self.do_write(err.encode())
except http.BufferUnderflowException:
print "Waiting for more data..."
def do_write(self, data):
self.wbuff += data
def handle_write(self):
if self.wbuff:
sent = self.send(self.wbuff)
print "Wrote %d bytes" % sent
self.wbuff = self.wbuff[sent:]
if len(self.wbuff) == 0:
self.close()
def writable(self):
return len(self.wbuff) > 0
def handle_error(self):
err = http.HTTPResponse(responsecode=500, response=traceback.format_exc())
self.do_write(err.encode())
|
bsd-3-clause
| -3,733,676,247,853,530,600 | 23.018868 | 77 | 0.677141 | false |
lisael/pg-django
|
django/db/models/fields/__init__.py
|
1
|
53910
|
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
from django.db.models.fields.subclassing import SubfieldBase
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (self.empty_strings_allowed and
connection.features.interprets_empty_strings_as_nulls):
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions.
Modified in pg-django to accept has, has_one and has_all arrays lookup
types
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull', 'has'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte', 'has_all',
'has_one'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
Modified in pg-django to accept has, has_one and has_all arrays lookup
types
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search', 'has'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in', 'has_one', 'has_all'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class SharedAutoField(AutoField):
"""A sequence shared amongst several tables"""
# a cache of sequences already existing in the db
_created = {}
def exists_for_connection(self,connection):
uid = connection.introspection.get_unique_name()
if not (uid, self.sequence) in SharedAutoField._created:
if connection.introspection.sequence_exists(self.sequence):
SharedAutoField._created[(uid, self.sequence)] = True
else:
SharedAutoField._created[(uid, self.sequence)] = False
return SharedAutoField._created[(uid, self.sequence)]
def set_exists_for_connection(self,connection):
"""set the field's db sequence as existing *without check*
Use with caution"""
uid = connection.introspection.get_unique_name()
SharedAutoField._created[(uid, self.sequence)] = True
def __init__(self, *args, **kwargs):
self.sequence = kwargs.pop('sequence', None)
super(SharedAutoField, self).__init__(*args, **kwargs)
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
if not getattr(connection.features, 'support_shared_sequence',False):
raise exceptions.FieldError(
"%s is not implemented for current database backend."
% self.__class__.__name__)
return connection.ops.get_sql_for_shared_sequence(self)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid date format. It must be "
u"in YYYY-MM-DD format."),
'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
value = smart_str(value)
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
value = smart_str(value)
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _(u"'%s' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
value = smart_str(value)
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False,
**kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(
validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class ArrayFieldBase(object):
"""Django field type for an array of values. Supported only on PostgreSQL.
This class is not meant to be instantiated directly; instead, field classes
should inherit from this class and from an appropriate Django model class.
``itertype`` is the python iterable type of field
"""
_south_introspects = True
def __init__(self, *args, **kwargs):
self.itertype = kwargs.pop('itertype', list)
super(ArrayFieldBase, self).__init__(*args, **kwargs)
@property
def subinstance(self):
if not hasattr(self, "_subinstance"):
self._subinstance = copy.deepcopy(self)
self._subinstance.__class__ = self.fieldtype
return self._subinstance
def db_type(self, connection):
if not getattr(connection.features, 'support_arrays',False):
raise exceptions.FieldError(
"%s is not implemented for current database backend"
% self.__class__.__name__)
return super(ArrayFieldBase, self).db_type(connection=connection) + '[]'
def to_python(self, value):
# psycopg2 already supports array types, so we don't actually need to serialize
# or deserialize
if value is None:
return None
if not isinstance(value, (list, tuple, set, self.itertype)):
try:
iter(value)
except TypeError:
raise exceptions.ValidationError(
"An ArrayField value must be None or an iterable.")
return self.itertype([self.fieldtype.to_python(self.subinstance, x) for x in value])
def get_prep_value(self, value):
if value is None:
return None
return [self.fieldtype.get_prep_value(self.subinstance, v) for v in value]
#def get_db_prep_save(self,value,connection):
#if isinstance(value, (list, tuple, set, self.itertype)):
#return 'ARRAY[%s]::%s' % (', '.join(self.fieldtype.get_db_prep_value(
#self.subinstance, v, connection) for v in value),
#self.db_type(connection))
#return self.fieldtype.get_db_prep_save(self.subinstance, value, connection)
def get_db_prep_save(self,value,connection):
if isinstance(value, (list, tuple, set, self.itertype)):
return [self.fieldtype.get_db_prep_save(self.subinstance, v,
connection) for v in value]
return self.fieldtype.get_db_prep_save(self.subinstance, value, connection)
def get_default(self):
return self.itertype()
def run_validators(self, value):
if value is None:
super(ArrayFieldBase, self).run_validators(value)
else:
for v in value:
super(ArrayFieldBase, self).run_validators(v)
class ArrayFieldMetaclass(SubfieldBase):
pass
def _array_field_factory(name, fieldtype, module=ArrayFieldBase.__module__):
return ArrayFieldMetaclass(name, (ArrayFieldBase, fieldtype),
{'__module__': module,
'description': "An array, where each element is of the same type "\
"as %s." % fieldtype.__name__,
'fieldtype': fieldtype})
# If you want to make an array version of a field not covered below, this is
# the easiest way:
#
# class FooArrayField(dbarray.ArrayFieldBase, FooField):
# __metaclass__ = dbarray.ArrayFieldMetaclass
BooleanArrayField = _array_field_factory('BooleanArrayField', BooleanField)
CharArrayField = _array_field_factory('CharArrayField', CharField)
#DateArrayField = _array_field_factory('DateArrayField', DateField)
#DateTimeArrayField = _array_field_factory('DateTimeArrayField', DateTimeField)
#DecimalArrayField = _array_field_factory('DecimalArrayField', DecimalField)
EmailArrayField = _array_field_factory('EmailArrayField', EmailField)
FilePathArrayField = _array_field_factory('FilePathArrayField', FilePathField)
FloatArrayField = _array_field_factory('FloatArrayField', FloatField)
IntegerArrayField = _array_field_factory('IntegerArrayField', IntegerField)
BigIntegerArrayField = _array_field_factory('BigIntegerArrayField', BigIntegerField)
#IPAddressArrayField = _array_field_factory('IPAddressArrayField', IPAddressField)
#GenericIPAddressArrayField = _array_field_factory('GenericIPAddressArrayField', GenericIPAddressField)
NullBooleanArrayField = _array_field_factory('NullBooleanArrayField', NullBooleanField)
#PositiveIntegerArrayField = _array_field_factory('PositiveIntegerArrayField', PositiveIntegerField)
#PositiveSmallIntegerArrayField = _array_field_factory('PositiveSmallIntegerArrayField', PositiveSmallIntegerField)
SlugArrayField = _array_field_factory('SlugArrayField', SlugField)
SmallIntegerArrayField = _array_field_factory('SmallIntegerArrayField', SmallIntegerField)
TextArrayField = _array_field_factory('TextArrayField', TextField)
#TimeArrayField = _array_field_factory('TimeArrayField', TimeField)
URLArrayField = _array_field_factory('URLArrayField', URLField)
|
bsd-3-clause
| 2,700,387,876,959,823,000 | 36.307958 | 115 | 0.601503 | false |
ionomy/ion
|
test/functional/token_test-pt1.py
|
1
|
8466
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the functionality of all CLI commands.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from time import sleep
from decimal import Decimal
import re
import sys
import os
import subprocess
ION_TX_FEE = 0.001
ION_AUTH_ADDR = "gAQQQjA4DCT2EZDVK6Jae4mFfB217V43Nt"
class TokenTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
#self.extra_args = [["-debug"],["-debug"]]
def run_test(self):
connect_nodes_bi(self.nodes, 0, 1)
tmpdir=self.options.tmpdir
self.log.info("Generating Tokens...")
self.nodes[0].generate(100)
self.nodes[0].importprivkey("cUnScAFQYLW8J8V9bWr57yj2AopudqTd266s6QuWGMMfMix3Hff4")
self.nodes[0].generate(100)
self.nodes[0].generate(100)
self.nodes[0].sendtoaddress(ION_AUTH_ADDR, 10)
self.nodes[0].generate(1)
MagicTok=self.nodes[0].configuremanagementtoken("MAGIC", "MagicToken", "4", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-MAGIC.json", "4f92d91db24bb0b8ca24a2ec86c4b012ccdc4b2e9d659c2079f5cc358413a765", "true")
self.nodes[0].generate(1)
MagicGroup_ID=MagicTok['groupID']
mintaddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, mintaddr, 500)
self.nodes[0].generate(1)
XDMTok=self.nodes[0].configuremanagementtoken("XDM", "DarkMatter", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-XDM.json", "f5125a90bde180ef073ce1109376d977f5cbddb5582643c81424cc6cc842babd", "true")
XDMGroup_ID=XDMTok['groupID']
AtomTok=self.nodes[0].configuremanagementtoken("ATOM", "Atom", "0", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ATOM.json", "b0425ee4ba234099970c53c28288da749e2a1afc0f49856f4cab82b37f72f6a5", "true")
AtomGroup_ID=AtomTok['groupID']
ELECTok=self.nodes[0].configuremanagementtoken("ELEC", "Electron", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ELEC.json", "6de2409add060ec4ef03d61c0966dc46508ed3498e202e9459e492a372ddccf5", "true")
ELECGroup_ID=ELECTok['groupID']
self.nodes[0].generate(1)
self.log.info("Token Info %s" % json.dumps(self.nodes[0].tokeninfo("all"), indent=4))
MagicAddr=self.nodes[0].getnewaddress()
XDMAddr=self.nodes[0].getnewaddress()
AtomAddr=self.nodes[0].getnewaddress()
ELECAddr=self.nodes[0].getnewaddress()
HulkAddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, MagicAddr, '4975')
self.nodes[0].generate(1)
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '71')
self.nodes[0].generate(1)
self.nodes[0].minttoken(AtomGroup_ID, AtomAddr, '100')
self.nodes[0].generate(1)
self.nodes[0].minttoken(ELECGroup_ID, ELECAddr, '1')
self.nodes[0].generate(1)
HULKTok=self.nodes[0].configuretoken("HULK", "HulkToken", "10", "https://raw.githubusercontent.com/CeForce/hulktoken/master/hulk.json", "367750e31cb276f5218c013473449c9e6a4019fed603d045b51e25f5db29283a", "true")
HulkGroup_ID=HULKTok['groupID']
self.nodes[0].generate(1)
self.nodes[0].minttoken(HulkGroup_ID, HulkAddr, '15')
self.nodes[0].generate(1)
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
self.log.info("XDM Ticker %s" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenAuth=self.nodes[0].listtokenauthorities()
for authority in tokenAuth:
self.log.info("Ticker %s" % authority['ticker'])
self.log.info("Authority address %s\n" % authority['address'])
self.log.info("Token Authorities %s" % authority['tokenAuthorities'])
self.log.info("Drop Mint Authoritiy for XDM")
XDMDrop=self.nodes[0].listtokenauthorities(XDMGroup_ID)
self.nodes[0].droptokenauthorities(XDMGroup_ID, XDMDrop[0]['txid'], str(XDMDrop[0]['vout']), 'configure')
self.nodes[0].generate(1)
tokenAuthority=(self.nodes[0].listtokenauthorities(XDMGroup_ID))
tokenXDMAddr=tokenAuthority[0]['address']
self.log.info("Token authorities XDM %s\n" % tokenXDMAddr)
try:
self.log.info("Try minting XDM tokens with mint flag removed")
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '100')
except Exception as e:
self.log.info(e)
#self.log.info("Re-Enable mint XDM")
#time.sleep(3600)
#self.nodes[0].createtokenauthorities(XDMGroup_ID, tokenXDMAddr, 'configure')
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s" % AtomBalance['balance'])
self.log.info("Melt 10 tokens from ATOM Group")
self.nodes[0].melttoken(AtomGroup_ID, '10')
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s\n" % AtomBalance['balance'])
self.log.info("Token info all (from node1)\n%s\n" % json.dumps(self.nodes[1].tokeninfo('all'), indent=4))
self.log.info("Token info ticker XDM\n%s\n" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("Token info name DarkMatter\n%s\n" % json.dumps(self.nodes[0].tokeninfo('name', 'darkmatter'), indent=4))
self.log.info("Token info groupid %s\n%s\n" % (XDMGroup_ID, json.dumps(self.nodes[0].tokeninfo('groupid', XDMGroup_ID), indent=4)))
ELEC_Trans=self.nodes[0].listtokentransactions(ELECGroup_ID)
self.log.info("Token Transactions Electron Token\n%s\n" % ELEC_Trans)
ElecTrans=ELEC_Trans[0]['txid']
ELEC_BlockHash=self.nodes[0].getblockhash(200)
self.log.info("Electron Transaction\n%s" % self.nodes[0].gettokentransaction(ElecTrans))
self.log.info("Blockhash block 200 %s" % ELEC_BlockHash)
self.log.info("\nTransaction ID %s" % ElecTrans)
self.log.info("Transaction Details %s" % self.nodes[0].gettokentransaction(ElecTrans, ELEC_BlockHash))
self.log.info("\nList tokens since block 200 Hulk\n%s" % self.nodes[0].listtokenssinceblock(ELECGroup_ID, ELEC_BlockHash))
tokenHulkUnspent=self.nodes[0].listunspenttokens(HulkGroup_ID)
newHulk=self.nodes[0].getnewaddress()
self.log.info("Send tokens to new address %s" % self.nodes[0].sendtoken(HulkGroup_ID, newHulk, 2))
self.nodes[0].generate(1)
self.log.info(self.nodes[1].getaddressbalance)
subgroupID=self.nodes[0].getsubgroupid(HulkGroup_ID,"Bruce_Banner")
self.log.info("Subgroup Info %s " % self.nodes[0].tokeninfo('groupid',subgroupID))
self.log.info("\nUnspent Tokens Hulk Token\n%s\n" % tokenHulkUnspent)
tokenReceiveAddr=self.nodes[1].getnewaddress()
rawTxid=tokenHulkUnspent[0]['txid']
rawVout=tokenHulkUnspent[0]['vout']
rawAddr=tokenReceiveAddr
rawAmount=0.01
self.log.info("txid %s" % rawTxid)
self.log.info("vout %s" % rawVout)
self.log.info("recaddr %s" % rawAddr)
self.log.info("amount %s" % rawAmount )
inputs=[{ "txid" : rawTxid, "vout" : rawVout }]
outputs={ rawAddr : rawAmount }
token={ rawAddr : { "amount" : 0.001, "groupid" : HulkGroup_ID, "token_amount" : 0.1 }}
self.log.info(str(inputs))
self.log.info(outputs)
self.log.info(token)
# ICC 86
#rawtx=self.nodes[0].createrawtokentransaction(inputs, outputs, token)
#self.log.info(rawtx)
#time.sleep(3600)
if __name__ == '__main__':
TokenTest().main()
|
mit
| 4,159,290,901,435,272,700 | 55.818792 | 245 | 0.66773 | false |
USGSDenverPychron/pychron
|
pychron/updater/packager.py
|
1
|
4769
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import shutil
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.globals import globalv
def copy_resources(root, dest, app_name):
icon_name = 'py{}_icon.icns'.format(app_name)
icon_file = os.path.join(root, 'resources', 'apps', icon_name)
if os.path.isfile(icon_file):
shutil.copyfile(icon_file,
os.path.join(dest, 'Resources', icon_name))
# copy icons
iroot = os.path.join(root, 'resources', 'icons')
rdest = os.path.join(dest, 'Resources')
idest = os.path.join(rdest, 'icons')
if not os.path.isdir(idest):
os.mkdir(idest)
includes = []
icon_req = os.path.join(root, 'resources','icon_req.txt')
if os.path.isfile(icon_req):
with open(icon_req, 'r') as rfile:
includes = [ri.strip() for ri in rfile.read().split('\n')]
for di in os.listdir(iroot):
head,_ = os.path.splitext(di)
if includes and head not in includes:
continue
copy_resource(idest, os.path.join(iroot, di))
# copy splashes and abouts
for ni, nd in (('splash', 'splashes'), ('about', 'abouts')):
sname = '{}_{}.png'.format(ni, app_name)
copy_resource(idest, os.path.join(root, 'resources', nd, sname), name='{}.png'.format(ni))
# copy helper mod
for a in ('helpers.py', 'ENV.txt'):
m = os.path.join(root, 'launchers', a)
copy_resource(rdest, m)
# copy qt_menu.nib
p = '/anaconda/python.app/Contents/Resources/qt_menu.nib'
if not os.path.isdir(p):
p = '{}/{}'.format(os.path.expanduser('~'),
'anaconda/python.app/Contents/Resources/qt_menu.nib')
copy_resource_dir(rdest, p)
def make_egg(root, dest, pkg_name, version):
from setuptools import setup, find_packages
pkgs = find_packages(root,
exclude=('app_utils', 'docs', 'launchers',
'migration', 'test', 'test.*', 'qtegra',
'sandbox', 'zobs'))
os.chdir(root)
try:
setup(name=pkg_name,
script_args=('bdist_egg',),
packages=pkgs)
except BaseException, e:
import traceback
traceback.print_exc()
eggname = '{}-0.0.0-py2.7.egg'.format(pkg_name)
# make the .pth file
if dest.endswith('Contents'):
rdest = os.path.join(dest, 'Resources')
with open(os.path.join(rdest,
'{}.pth'.format(pkg_name)), 'w') as wfile:
if not globalv.debug:
wfile.write('{}\n'.format(eggname))
if not globalv.debug:
egg_root = os.path.join(root, 'dist', eggname)
copy_resource(rdest, egg_root)
if not globalv.debug:
# remove build dir
for di in ('build', 'dist','pychron.egg-info'):
p = os.path.join(root, di)
print 'removing entire {} dir {}'.format(di, p)
if os.path.isdir(p):
shutil.rmtree(p)
else:
print 'not a directory {}'.format(p)
def resource_path(dest, name):
return os.path.join(dest, name)
def copy_resource_dir(dest, src, name=None):
if os.path.exists(src):
if name is None:
name = os.path.basename(src)
rd = resource_path(dest, name)
if not os.path.exists(rd):
shutil.copytree(src, rd)
else:
print '++++++++++++++++++++++ Not a valid Resource {} +++++++++++++++++++++++'.format(src)
def copy_resource(dest, src, name=None):
if os.path.isfile(src):
if name is None:
name = os.path.basename(src)
shutil.copyfile(src, resource_path(dest, name))
else:
print '++++++++++++++++++++++ Not a valid Resource {} +++++++++++++++++++++++'.format(src)
# ============= EOF =============================================
|
apache-2.0
| -5,193,884,487,105,296,000 | 33.557971 | 98 | 0.533235 | false |
Connexions/cnx-epub
|
cnxepub/tests/test_formatters.py
|
1
|
37587
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import codecs
import io
import json
import mimetypes
import os
import subprocess
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ..testing import (TEST_DATA_DIR, unescape,
_get_memcache_client, IS_MEMCACHE_ENABLED)
from ..formatters import exercise_callback_factory
here = os.path.abspath(os.path.dirname(__file__))
IS_PY3 = sys.version_info.major == 3
XMLPP_DIR = os.path.join(here, 'utils')
def xmlpp(input_):
"""Pretty Print XML"""
proc = subprocess.Popen(['./xmlpp.pl', '-sSten'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=XMLPP_DIR)
output, _ = proc.communicate(input_)
return output
def _c14n(val):
ov = io.BytesIO()
ET = etree.fromstring(str(val)).getroottree()
ET.write_c14n(ov)
return ov.getvalue().decode('utf-8')
def last_extension(*args, **kwargs):
# Always return the last value of sorted mimetypes.guess_all_extensions
exts = mimetypes.guess_all_extensions(*args, **kwargs)
return sorted(exts)[-1]
EXERCISE_JSON_HTML = {
"items": [
{
"uid": "93@3",
"group_uuid": "e071207a-9d26-4cff-bbe9-9060d3d13ca6",
"copyright_holders": [
{
"user_id": 2,
"name": "Rice University"
}
],
"uuid": "8fa80526-0720-4a98-99c8-5d6113482424",
"authors": [
{
"user_id": 1,
"name": "OpenStax"
}
],
"published_at": "2016-09-16T17:40:20.497Z",
"number": 93,
"editors": [],
"is_vocab": False,
"stimulus_html": "<p>Please answer the following question:</p>",
"questions": [
{
"stimulus_html": "",
"formats": [
"free-response",
"multiple-choice"
],
"hints": [],
"id": 63062,
"is_answer_order_important": True,
"answers": [
{
"id": 259956,
"content_html": "monomers",
"correctness": "0.0"
},
{
"content_html": "polymers (<span data-math='retry' />)",
"id": 259957,
"correctness": "1.0"
},
{
"id": 259958,
"content_html": "carbohydrates only (<span data-math='' />)",
"correctness": "0.0"
},
{
"content_html": "water only (<span data-math='\\text{H}_2\\text{O}'>\\text{H}_2\\text{O}</span>)",
"id": 259959,
"correctness": "0.0"
},
{
"content_html": "polymer and water (<div data-math='\\text{H}_2\\text{O}'>\\text{H}_2\\text{O}</div>)",
"id": 259959,
"correctness": "1.0"
}
],
"combo_choices": [],
"stem_html": "Dehydration <img href='none'> synthesis leads to the formation of what?"
}
],
"tags": [
"apbio",
"inbook-yes",
"ost-chapter-review",
"review",
"apbio-ch03",
"apbio-ch03-s01",
"apbio-ch03-s01-lo01",
"apbio-ch03-ex002",
"dok:1",
"blooms:1",
"time:short",
"book:stax-bio",
"context-cnxmod:ea44b8fa-e7a2-4360-ad34-ac081bcf104f",
"exid:apbio-ch03-ex002",
"context-cnxmod:85d6c500-9860-42e8-853a-e6940a50224f",
"book:stax-apbio",
"filter-type:import:hs",
"type:conceptual-or-recall"
],
"derived_from": [],
"version": 3
}
],
"total_count": 1
}
EXERCISE_JSON = {
"items": [
{
"uid": "93@3",
"group_uuid": "e071207a-9d26-4cff-bbe9-9060d3d13ca6",
"copyright_holders": [
{
"user_id": 2,
"name": "Rice University"
}
],
"uuid": "8fa80526-0720-4a98-99c8-5d6113482424",
"authors": [
{
"user_id": 1,
"name": "OpenStax"
}
],
"published_at": "2016-09-16T17:40:20.497Z",
"number": 93,
"editors": [],
"is_vocab": False,
"stimulus_html": "",
"questions": [
{
"stimulus_html": "",
"formats": [
"free-response",
"multiple-choice"
],
"hints": [],
"id": 63062,
"is_answer_order_important": True,
"answers": [
{
"id": 259956,
"content_html": "monomers"
},
{
"content_html": "polymers",
"id": 259957
},
{
"id": 259958,
"content_html": "carbohydrates only"
},
{
"content_html": "water only",
"id": 259959
}
],
"combo_choices": [],
"stem_html": "Dehydration <img href='none'/> synthesis leads to the formation of what?"
}
],
"tags": [
"apbio",
"inbook-yes",
"ost-chapter-review",
"review",
"apbio-ch03",
"apbio-ch03-s01",
"apbio-ch03-s01-lo01",
"apbio-ch03-ex002",
"dok:1",
"blooms:1",
"time:short",
"book:stax-bio",
"context-cnxmod:ea44b8fa-e7a2-4360-ad34-ac081bcf104f",
"exid:apbio-ch03-ex002",
"context-cnxmod:85d6c500-9860-42e8-853a-e6940a50224f",
"book:stax-apbio",
"filter-type:import:hs",
"type:conceptual-or-recall"
],
"derived_from": [],
"version": 3
}
],
"total_count": 1
}
BAD_EQUATION_JSON = {
"error": "E_VALIDATION",
"status": 400,
"summary": "1 attribute is invalid",
"model": "Equation",
"invalidAttributes": {
"math": [{"rule": "required",
"message": "\"required\" validation rule failed for input: ''\nSpecifically, it threw an error. Details:\n undefined"}]
}
}
EQUATION_JSON = {
"updatedAt": "2016-10-31T16:06:44.413Z",
"cloudUrl": "https://mathmlcloud.cnx.org:1337/equation/58176c14d08360010084f48c",
"mathType": "TeX",
"math": "\\text{H}_2\\text{O}",
"components": [
{
"format": "mml",
"equation": "58176c14d08360010084f48c",
"source": '<math xmlns="http://www.w3.org/1998/Math/MathML" display="block">\n <msub>\n <mtext>H</mtext>\n <mn>2</mn>\n </msub>\n <mtext>O</mtext>\n</math>',
"updatedAt": "2016-10-31T16:06:44.477Z",
"id": "58176c14d08360010084f48d",
"createdAt": "2016-10-31T16:06:44.477Z"
}
],
"submittedBy": None,
"ip_address": "::ffff:10.64.71.226",
"id": "58176c14d08360010084f48c",
"createdAt": "2016-10-31T16:06:44.413Z"
}
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.text = json.dumps(json_data)
self.status_code = status_code
def json(self):
return self.json_data
def mocked_requests_get(*args, **kwargs):
# Replace requests.get with this mock
# modified from http://stackoverflow.com/a/28507806/5430
if args[0] == 'https://exercises.openstax.org/api/exercises?q=tag:apbio-ch03-ex002':
if 'headers' in kwargs:
assert kwargs['headers'] == {'Authorization': 'Bearer somesortoftoken'}
return MockResponse(EXERCISE_JSON_HTML, 200)
return MockResponse(EXERCISE_JSON, 200)
else:
return MockResponse({"total_count": 0, "items": []}, 200)
def mocked_requests_post(*args, **kwargs):
if args[0].startswith('http://mathmlcloud.cnx.org/equation'):
if args[1]['math'] == b'\\text{H}_2\\text{O}':
return MockResponse(EQUATION_JSON, 200)
elif args[1]['math'] == b'retry':
return MockResponse('{}', 200)
elif args[1]['math'] == b'':
return MockResponse(BAD_EQUATION_JSON, 400)
else:
return MockResponse('', 500)
return MockResponse({}, 404)
class DocumentContentFormatterTestCase(unittest.TestCase):
def test_document(self):
from ..models import Document
from ..formatters import DocumentContentFormatter
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': "Goofy Goober Rock",
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
# Build test document.
metadata = base_metadata.copy()
document = Document('title',
io.BytesIO(u'<body><p>コンテンツ...</p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
expected_html = u"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<body><p>コンテンツ...</p></body>
</html>
"""
self.assertEqual(expected_html, unescape(html))
def test_document_mathjax(self):
from ..models import Document
from ..formatters import DocumentContentFormatter
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': "Goofy Goober Rock",
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
# Build test document.
metadata = base_metadata.copy()
document = Document('title',
io.BytesIO(u'<body><p><m:math xmlns:m="http://www.w3.org/1998/Math/MathML"/></p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
expected_html = u"""\
<html
xmlns='http://www.w3.org/1999/xhtml'
xmlns:m='http://www.w3.org/1998/Math/MathML'
>
<body>
<p>
<math></math>
</p>
</body>
</html>
"""
self.assertMultiLineEqual(
expected_html,
xmlpp(unescape(html).encode('utf-8')).decode('utf-8'))
# Second variation. Hoisted namespace declaration
document = Document('title',
io.BytesIO(u'<body><p xmlns:m="http://www.w3.org/1998/Math/MathML"><m:math/></p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
self.assertMultiLineEqual(
expected_html,
xmlpp(unescape(html).encode('utf-8')).decode('utf-8'))
class DocumentSummaryFormatterTestCase(unittest.TestCase):
def test_summary_w_one_tag(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': '<p>résumé</p>'})
html = str(DocumentSummaryFormatter(document))
self.assertEqual('<p>résumé</p>', html)
def test_summary_w_just_text(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': 'résumé'})
html = str(DocumentSummaryFormatter(document))
expected = """\
<div class="description" data-type="description"\
xmlns="http://www.w3.org/1999/xhtml">
résumé
</div>"""
self.assertEqual(expected, html)
def test_summary_w_text_and_tags(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': 'résumé<p>etc</p><p>...</p>'})
html = str(DocumentSummaryFormatter(document))
expected = """\
<div class="description" data-type="description"\
xmlns="http://www.w3.org/1999/xhtml">
résumé<p>etc</p><p>...</p>
</div>"""
self.assertEqual(expected, html)
@mock.patch('mimetypes.guess_extension', last_extension)
class HTMLFormatterTestCase(unittest.TestCase):
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': 'タイトル',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
maxDiff = None
def xpath(self, path):
from ..html_parsers import HTML_DOCUMENT_NAMESPACES
return self.root.xpath(path, namespaces=HTML_DOCUMENT_NAMESPACES)
def test_document(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
document = Document(
metadata['title'],
io.BytesIO(u'<body><p>コンテンツ...</p></body>'.encode('utf-8')),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
self.assertIn(u'<p>コンテンツ...</p>', html)
self.assertEqual(
u'タイトル',
self.xpath('//*[@data-type="document-title"]/text()')[0])
self.assertEqual(
'summary',
self.xpath('//*[@class="description"]/xhtml:p/text()')[0])
self.assertEqual(
metadata['created'],
self.xpath('//xhtml:meta[@itemprop="dateCreated"]/@content')[0])
self.assertEqual(
metadata['revised'],
self.xpath('//xhtml:meta[@itemprop="dateModified"]/@content')[0])
self.assertEqual(
metadata['revised'],
self.xpath('.//xhtml:*[@data-type="revised"]/@data-value')[0])
self.assertEqual(
metadata['canonical_book_uuid'],
self.xpath('.//xhtml:*[@data-type="canonical-book-uuid"]/@data-value')[0]
)
self.assertEqual(
metadata['language'],
self.xpath('//xhtml:html/@lang')[0]
)
self.assertEqual(
metadata['language'],
self.xpath('//xhtml:meta[@itemprop="inLanguage"]/@content')[0]
)
def test_document_nolang(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['language'] = None
document = Document(
metadata['title'],
io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertEqual(
0,
len(self.xpath('//xhtml:html/@lang'))
)
self.assertEqual(
0,
len(self.xpath('//xhtml:meta[@itemprop="inLanguage"]/@content'))
)
def test_document_nocreated(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['created'] = None
document = Document(
metadata['title'],
io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertEqual(
0,
len(self.xpath('//xhtml:meta[@itemprop="dateCreated"]/@content'))
)
def test_document_pointer(self):
from ..models import DocumentPointer
from ..formatters import HTMLFormatter
# Build test document pointer.
pointer = DocumentPointer('pointer@1', {
'title': self.base_metadata['title'],
'cnx-archive-uri': 'pointer@1',
'url': 'https://cnx.org/contents/pointer@1',
})
html = str(HTMLFormatter(pointer))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
self.assertIn(
u'<a href="https://cnx.org/contents/pointer@1">', html)
self.assertEqual(
u'タイトル',
self.xpath('//*[@data-type="document-title"]/text()')[0])
self.assertEqual(
'pointer@1',
self.xpath('//*[@data-type="cnx-archive-uri"]/@data-value')[0])
def test_binder(self):
from ..models import (Binder, TranslucentBinder, Document,
DocumentPointer)
from ..formatters import HTMLFormatter
# Build test binder.
binder = Binder(self.base_metadata['title'], metadata={
'title': self.base_metadata['title'],
'license_url': self.base_metadata['license_url'],
'license_text': self.base_metadata['license_text'],
'language': self.base_metadata['language']
})
metadata = self.base_metadata.copy()
metadata.update({
'title': "entrée",
'derived_from_uri': 'http://cnx.org/contents/'
'dd68a67a-11f4-4140-a49f-b78e856e2262@1',
'derived_from_title': "Taking Customers' Orders",
})
binder.append(Document('ingress', io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata))
translucent_binder = TranslucentBinder(metadata={'title': 'Kranken'})
binder.append(translucent_binder)
metadata = self.base_metadata.copy()
metadata.update({
'title': "egress",
'cnx-archive-uri': 'e78d4f90-e078-49d2-beac-e95e8be70667'})
translucent_binder.append(
Document('egress', io.BytesIO(u'<body><p>hüvasti.</p></body>'.encode('utf-8')),
metadata=metadata))
binder.append(DocumentPointer('pointer@1', {
'title': 'Pointer',
'cnx-archive-uri': 'pointer@1',
'url': 'http://cnx.org/contents/pointer@1'}))
html = str(HTMLFormatter(binder))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li')
self.assertEqual(3, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual(u'entrée', lis[0][0].text)
self.assertEqual('Kranken', lis[1][0].text)
self.assertEqual('[email protected]', lis[2][0].attrib['href'])
self.assertEqual('Pointer', lis[2][0].text)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li[2]/xhtml:ol/xhtml:li')
self.assertEqual(1, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual('egress', lis[0][0].text)
def test_translucent_binder(self):
from ..models import (TranslucentBinder, Document)
from ..formatters import HTMLFormatter
# Build test translucent binder.
binder = TranslucentBinder(metadata={
'title': self.base_metadata['title'],
})
metadata = self.base_metadata.copy()
metadata.update({
'title': "entrée",
'derived_from_uri': 'http://cnx.org/contents/'
'dd68a67a-11f4-4140-a49f-b78e856e2262@1',
'derived_from_title': "Taking Customers' Orders",
})
binder.append(Document('ingress', io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata))
html = str(HTMLFormatter(binder))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li')
self.assertEqual(1, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual(u'entrée', lis[0][0].text)
def test_document_auto_generate_ids(self):
from ..models import Document
from ..formatters import HTMLFormatter
content = """<body>\
<div class="title" id="title">Preface</div>
<p class="para" id="my-id">This thing and <em>that</em> thing.</p>
<p class="para"><a href="#title">Link</a> to title</p></body>"""
page_one_id = 'fa21215a-91b5-424a-9fbd-5c451f309b87'
expected_content = """\
<div class="title" id="auto_{id}_title">Preface</div>
<p class="para" id="auto_{id}_my-id">This thing and <em>that</em> thing.</p>
<p class="para" id="auto_{id}_{n}"><a href="#auto_{id}_title">Link</a> to title</p>\
""".format(id=page_one_id, n=0)
document = Document(page_one_id, content)
formatted = str(HTMLFormatter(document, generate_ids=True))
self.assertIn(expected_content, formatted)
@mock.patch('mimetypes.guess_extension', last_extension)
class SingleHTMLFormatterTestCase(unittest.TestCase):
base_metadata = {
'publishers': [],
'created': '2016/03/04 17:05:20 -0500',
'revised': '2013/03/05 09:35:24 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Good Food',
'id': 'yum'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Humanities'],
'translators': [],
'keywords': ['Food', 'デザート', 'Pudding'],
'title': 'チョコレート',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
}
maxDiff = None
def setUp(self):
from ..models import (TranslucentBinder, Binder, Document,
Resource, CompositeDocument)
with open(os.path.join(TEST_DATA_DIR, '1x1.jpg'), 'rb') as f:
jpg = Resource('1x1.jpg', io.BytesIO(f.read()), 'image/jpeg',
filename='small.jpg')
metadata = self.base_metadata.copy()
contents = io.BytesIO(u"""\
<body>
<h1>Chocolate Desserts</h1>
<p><a href="#list">List</a> of desserts to try:</p>
<div data-type="list" id="list"><ul><li>Chocolate Orange Tart,</li>
<li>Hot Mocha Puddings,</li>
<li>Chocolate and Banana French Toast,</li>
<li>Chocolate Truffles...</li>
</ul></div><img src="/resources/1x1.jpg" /><p>チョコレートデザート</p>
</body>
""".encode('utf-8'))
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self.chocolate = Document('chocolate', contents, metadata=metadata,
resources=[jpg])
metadata = self.base_metadata.copy()
metadata['title'] = 'Apple'
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
contents = io.BytesIO(b"""\
<body>
<h1>Apple Desserts</h1>
<p><a href="/contents/lemon">Link to lemon</a>. Here are some examples:</p>
<ul><li id="auto_apple_1">Apple Crumble,</li>
<li>Apfelstrudel,</li>
<li id="auto_apple_0">Caramel Apple,</li>
<li>Apple Pie,</li>
<li>Apple sauce...</li>
</ul>
</body>
""")
self.apple = Document('apple', contents, metadata=metadata)
metadata = self.base_metadata.copy()
metadata['title'] = 'Lemon'
contents = io.BytesIO(b"""\
<body class="fruity">
<h1>Lemon Desserts</h1>
<p>Yum! <img src="/resources/1x1.jpg" /></p>
<div data-type="exercise">
<a href="#ost/api/ex/apbio-ch03-ex002">[link]</a>
</div>
<div data-type="exercise">
<p>
<a href="#ost/api/ex/nosuchtag">[link]</a>
</p>
</div>
<ul><li>Lemon & Lime Crush,</li>
<li>Lemon Drizzle Loaf,</li>
<li>Lemon Cheesecake,</li>
<li>Raspberry & Lemon Polenta Cake...</li>
</ul>
</body>
""")
self.lemon = Document('lemon', contents, metadata=metadata,
resources=[jpg])
metadata = self.base_metadata.copy()
metadata['title'] = 'Citrus'
self.citrus = TranslucentBinder([self.lemon], metadata=metadata)
title_overrides = [
self.apple.metadata['title'],
u'<span>1.1</span> <span>|</span> <span>レモン</span>',
'<span>Chapter</span> <span>2</span> <span>citrus</span>']
self.fruity = Binder('ec84e75d-9973-41f1-ab9d-1a3ebaef87e2', [self.apple, self.lemon, self.citrus],
metadata={'title': 'Fruity',
'cnx-archive-uri': 'ec84e75d-9973-41f1-ab9d-1a3ebaef87e2',
'cnx-archive-shortid': 'frt',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
},
title_overrides=title_overrides)
metadata = self.base_metadata.copy()
metadata['title'] = 'Extra Stuff'
contents = io.BytesIO(b"""\
<body>
<h1>Extra Stuff</h1>
<p>This is a composite page.</p>
<p>Here is a <a href="#auto_chocolate_list">link</a> to another document.</p>
</body>
""")
self.extra = CompositeDocument(
'extra', contents, metadata=metadata)
with open(os.path.join(TEST_DATA_DIR, 'cover.png'), 'rb') as f:
cover_png = Resource(
'cover.png', io.BytesIO(f.read()), 'image/png',
filename='cover.png')
self.desserts = Binder(
'Desserts', [self.fruity, self.chocolate, self.extra],
metadata={'title': 'Desserts',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'license_text': 'CC-By 4.0',
'cnx-archive-uri': '[email protected]',
'language': 'en',
'slug': 'desserts'},
resources=[cover_png])
def test_binder(self):
from ..formatters import SingleHTMLFormatter
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with open(page_path, 'r') as f:
expected_content = f.read()
actual = str(SingleHTMLFormatter(self.desserts))
out_path = os.path.join(TEST_DATA_DIR,
'desserts-single-page-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(actual)
self.assertMultiLineEqual(expected_content, actual)
# Placed after the assert, so only called if success:
os.remove(out_path)
def test_str_unicode_bytes(self):
from ..formatters import SingleHTMLFormatter
html = bytes(SingleHTMLFormatter(self.desserts))
if IS_PY3:
self.assertMultiLineEqual(
html.decode('utf-8'), str(SingleHTMLFormatter(self.desserts)))
else:
self.assertMultiLineEqual(
html, str(SingleHTMLFormatter(self.desserts)))
self.assertMultiLineEqual(
html,
unicode(SingleHTMLFormatter(self.desserts)).encode('utf-8'))
@mock.patch('requests.get', mocked_requests_get)
def test_includes_callback(self):
from ..formatters import SingleHTMLFormatter
def _upcase_text(elem):
if elem.text:
elem.text = elem.text.upper()
for child in elem.iterdescendants():
if child.text:
child.text = child.text.upper()
if child.tail:
child.tail = child.tail.upper()
page_path = os.path.join(TEST_DATA_DIR, 'desserts-includes.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with codecs.open(page_path, 'r', encoding='utf-8') as f:
expected_content = f.read()
exercise_url = \
'https://%s/api/exercises?q=tag:{itemCode}' % ('exercises.openstax.org')
exercise_match = '#ost/api/ex/'
if IS_MEMCACHE_ENABLED:
mc_client = _get_memcache_client()
else:
mc_client = None
includes = [exercise_callback_factory(exercise_match,
exercise_url,
mc_client),
('//xhtml:*[@data-type = "exercise"]', _upcase_text),
('//xhtml:a', _upcase_text)]
actual = SingleHTMLFormatter(self.desserts,
includes=includes)
out_path = os.path.join(TEST_DATA_DIR, 'desserts-includes-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(xmlpp(unicode(actual).encode('utf-8')))
with codecs.open(out_path, 'r', encoding='utf-8') as f:
actual_content = f.read()
self.assertEqual(xmlpp(expected_content.encode('utf-8')).split(b'\n'),
xmlpp(actual_content.encode('utf-8')).split(b'\n'))
else:
with open(out_path, 'w') as out:
out.write(str(actual))
self.assertMultiLineEqual(expected_content, str(actual))
# After assert, so won't clean up if test fails
os.remove(out_path)
@mock.patch('requests.post', mocked_requests_post)
@mock.patch('requests.get', mocked_requests_get)
def test_includes_token_callback(self):
from ..formatters import SingleHTMLFormatter
def _upcase_text(elem):
if elem.text:
elem.text = elem.text.upper()
for child in elem.iterdescendants():
if child.text:
child.text = child.text.upper()
if child.tail:
child.tail = child.tail.upper()
page_path = os.path.join(TEST_DATA_DIR, 'desserts-includes-token.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with codecs.open(page_path, 'r', encoding='utf-8') as f:
expected_content = f.read()
exercise_url = \
'https://%s/api/exercises?q=tag:{itemCode}' % ('exercises.openstax.org')
exercise_match = '#ost/api/ex/'
exercise_token = 'somesortoftoken'
mathml_url = 'http://mathmlcloud.cnx.org/equation'
if IS_MEMCACHE_ENABLED:
mc_client = _get_memcache_client()
else:
mc_client = None
includes = [exercise_callback_factory(exercise_match,
exercise_url,
mc_client,
exercise_token,
mathml_url),
('//xhtml:*[@data-type = "exercise"]', _upcase_text),
('//xhtml:a', _upcase_text)]
actual = SingleHTMLFormatter(self.desserts,
includes=includes)
out_path = os.path.join(TEST_DATA_DIR,
'desserts-includes-token-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(xmlpp(unicode(actual).encode('utf-8')))
with codecs.open(out_path, 'r', encoding='utf-8') as f:
actual_content = f.read()
self.assertEqual(xmlpp(expected_content.encode('utf-8')).split(b'\n'),
xmlpp(actual_content.encode('utf-8')).split(b'\n'))
else:
with open(out_path, 'w') as out:
out.write(str(actual))
self.assertMultiLineEqual(expected_content, str(actual))
# After assert, so won't clean up if test fails
os.remove(out_path)
class FixNamespacesTestCase(unittest.TestCase):
def test(self):
from ..formatters import _fix_namespaces
actual = _fix_namespaces("""\
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<body xmlns:bib="http://bibtexml.sf.net/">
<p>Some text<em><!-- no-selfclose --></em>!</p>
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mtext>H</mtext>
</math>
</body>
</html>""")
expected_content = """\
<html
lang='en'
xmlns='http://www.w3.org/1999/xhtml'
xmlns:m='http://www.w3.org/1998/Math/MathML'
>
<body>
<p>Some text
<em><!-- no-selfclose --></em>!
</p>
<m:math>
<m:mtext>H</m:mtext>
</m:math>
</body>
</html>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_content, xmlpp(actual).decode('utf-8'))
class ExerciseCallbackTestCase(unittest.TestCase):
@mock.patch('cnxepub.formatters.logger')
@mock.patch('cnxepub.formatters.requests.get')
@mock.patch('cnxepub.formatters.requests.post')
def test_xmlsyntaxerror(self, requests_post, requests_get, logger):
from ..formatters import exercise_callback_factory
xpath, cb = exercise_callback_factory(
'#ost/api/ex/',
'https://exercises/{itemCode}',
mml_url='https://mathmlcloud/')
self.assertEqual(xpath, '//xhtml:a[contains(@href, "#ost/api/ex/")]')
node = etree.fromstring("""
<div>
<a href="#ost/api/ex/book-ch01-ex001"></a>
</div>""")
tex_math = r'<span data-math="1\ \text{kcal}"></span>'
get_resp = mock.Mock()
get_resp.json.return_value = {
'total_count': 1,
'items': [{
'questions': [{
'stem_html': tex_math,
}],
}]}
requests_get.return_value = get_resp
mathml = r"""<math xmlns="http://www.w3.org/1998/Math/MathML"
display="block" alttext="1 kcal">
<mn>1</mn>
<mtext> </mtext>
<mtext>kcal</mtext>
</math>
"""
post_resp = mock.Mock()
post_resp.json.return_value = {'components': [
{'format': 'mml',
'source': mathml}]}
requests_post.return_value = post_resp
self.assertRaises(etree.XMLSyntaxError, cb, node.getchildren()[0])
self.assertEqual(logger.error.call_args[0][0].strip(), u"""\
Error converting math in book-ch01-ex001:
math: 1\\ \\text{kcal}
mathml: <math xmlns="http://www.w3.org/1998/Math/MathML"
display="block" alttext="1 kcal">
<mn>1</mn>
<mtext> </mtext>
<mtext>kcal</mtext>
</math>""")
|
agpl-3.0
| 1,293,149,302,066,881,000 | 33.655556 | 178 | 0.523271 | false |
ASoftTech/Scons-Tools-Grbd
|
scons_tools_grbd/Tools/MSBuild/VC/Dll2Lib.py
|
1
|
3979
|
"""
Dll2Lib
This tool will generate a .lib file under windows for a given .dll file
This uses dumpfile to export a list of symbols
dumpbin /exports C:\yourpath\yourlib.dll
The list of symbols is then written to a .def file
The lib command is then used to generate the .lib file from the .def file
lib /def:C:\mypath\mylib.def /OUT:C:\mypath\mylib.lib
A side affect of this is an .exp file which also requires cleanup
We can then use the .lib file for linking with the compiler under Windows
"""
import os, sys, os.path as path, subprocess
import SCons.Script
from SCons.Environment import Environment
from SCons.Script import Builder
from SCons.Tool.MSCommon import msvc_exists, msvc_setup_env_once
def exists(env):
return msvc_exists()
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
# Set-up ms tools paths
msvc_setup_env_once(env)
env.SetDefault(
# Location of the dumpbin executable
DUMPBIN = 'dumpbin',
)
# Register the builder
bld = Builder(action = __Dll2Lib_func, emitter = __Dll2Lib_emitter)
env.Append(BUILDERS = {'Dll2Lib' : bld})
def __Dll2Lib_emitter(target, source, env):
"""Add the generated .def and .exp files to the list of targerts for cleanup"""
addfiles = []
for item in target:
libfile = item.abspath
deffile = path.splitext(libfile)[0] + '.def'
expfile = path.splitext(libfile)[0] + '.exp'
addfiles.append(File(deffile))
addfiles.append(File(expfile))
target = target + addfiles
return target, source
def __Dll2Lib_func(target, source, env):
"""Actual builder that does the work after the Sconscript file is parsed"""
index = 0
for srcitem in source:
srcfile = str(srcitem)
filename = str(target[index])
libfile = path.splitext(filename)[0] + '.lib'
deffile = path.splitext(filename)[0] + '.def'
if path.splitext(srcfile)[1] != '.dll':
continue
dumpbin_exp = __dumpbin_run_exports(env, srcfile)
exportlist = __dumpbin_parse_exports(dumpbin_exp)
__write_deffile(deffile, exportlist)
__generate_lib(env, deffile, libfile)
index = index + 1
def __dumpbin_run_exports(env, dllfile):
"""Run dumpbin /exports against the input dll"""
cmdopts = [env['DUMPBIN'], '/exports', str(dllfile)]
print("Calling '%s'" % env['DUMPBIN'])
stdout, stderr = __runcmd_mbcs(env, cmdopts)
return stdout
def __dumpbin_parse_exports(input):
"""Parse thr output from dumpbin as a list of symbols"""
ret = []
lines = input.split('\n')
for line in lines:
arr1 = line.split()
if len(arr1) == 4 and arr1[1] != 'number' and arr1[1] != 'hint':
ret.append(arr1[3])
return ret
def __write_deffile(outfile, lines):
"""Write the list of symbols to a .def file"""
with open(outfile, 'w') as f:
f.write('EXPORTS\n')
for line in lines:
f.write(line + '\n')
def __generate_lib(env, deffile, libfile):
"""Generate the .lib file"""
cmdopts = [env['AR'], '/def:' + deffile, '/OUT:' + libfile]
stdout, stderr = __runcmd_mbcs(env, cmdopts)
return stdout
def __runcmd_mbcs(env, cmdopts):
"""Run command while capturing the output"""
popen = SCons.Action._subproc(env, cmdopts, stdin='devnull',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = popen.stdout.read()
stderr = popen.stderr.read()
if not isinstance(stderr, str):
stderr = stderr.decode("mbcs")
if not isinstance(stdout, str):
stdout = stdout.decode("mbcs")
if stderr:
import sys
sys.stderr.write(stderr)
if popen.wait() != 0:
raise IOError(stderr)
return stdout, stderr
|
mit
| -6,572,437,492,332,772,000 | 30.614754 | 83 | 0.617994 | false |
googleapis/googleapis-gen
|
google/cloud/dialogflow/v2beta1/dialogflow-v2beta1-py/google/cloud/dialogflow_v2beta1/services/fulfillments/transports/grpc_asyncio.py
|
1
|
13042
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflow_v2beta1.types import fulfillment
from google.cloud.dialogflow_v2beta1.types import fulfillment as gcd_fulfillment
from .base import FulfillmentsTransport, DEFAULT_CLIENT_INFO
from .grpc import FulfillmentsGrpcTransport
class FulfillmentsGrpcAsyncIOTransport(FulfillmentsTransport):
"""gRPC AsyncIO backend transport for Fulfillments.
Service for managing
[Fulfillments][google.cloud.dialogflow.v2beta1.Fulfillment].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'dialogflow.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'dialogflow.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_fulfillment(self) -> Callable[
[fulfillment.GetFulfillmentRequest],
Awaitable[fulfillment.Fulfillment]]:
r"""Return a callable for the get fulfillment method over gRPC.
Retrieves the fulfillment.
Returns:
Callable[[~.GetFulfillmentRequest],
Awaitable[~.Fulfillment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_fulfillment' not in self._stubs:
self._stubs['get_fulfillment'] = self.grpc_channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.Fulfillments/GetFulfillment',
request_serializer=fulfillment.GetFulfillmentRequest.serialize,
response_deserializer=fulfillment.Fulfillment.deserialize,
)
return self._stubs['get_fulfillment']
@property
def update_fulfillment(self) -> Callable[
[gcd_fulfillment.UpdateFulfillmentRequest],
Awaitable[gcd_fulfillment.Fulfillment]]:
r"""Return a callable for the update fulfillment method over gRPC.
Updates the fulfillment.
Returns:
Callable[[~.UpdateFulfillmentRequest],
Awaitable[~.Fulfillment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_fulfillment' not in self._stubs:
self._stubs['update_fulfillment'] = self.grpc_channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.Fulfillments/UpdateFulfillment',
request_serializer=gcd_fulfillment.UpdateFulfillmentRequest.serialize,
response_deserializer=gcd_fulfillment.Fulfillment.deserialize,
)
return self._stubs['update_fulfillment']
__all__ = (
'FulfillmentsGrpcAsyncIOTransport',
)
|
apache-2.0
| 4,529,365,547,672,977,400 | 44.922535 | 87 | 0.614553 | false |
myfavouritekk/TPN
|
tools/propagate/regression_propagation.py
|
1
|
6216
|
#!/usr/bin/env python
# --------------------------------------------------------
# Test regression propagation on ImageNet VID video
# Modified by Kai KANG ([email protected])
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
# add caffe-mpi path
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python'))
import caffe
# add py-faster-rcnn paths
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.craft import im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
# add external libs
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
# add src libs
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import roi_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--param', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--num_per_batch', dest='boxes_num_per_batch',
help='split boxes to batches. [32]',
default=32, type=int)
parser.add_argument('--bbox_mean', dest='bbox_mean',
help='the mean of bbox',
default=None, type=str)
parser.add_argument('--bbox_std', dest='bbox_std',
help='the std of bbox',
default=None, type=str)
parser.add_argument('--bbox_pred_layer', dest='bbox_pred_layer',
help='Layer name for bbox regression layer in feature net.',
default='bbox_pred_vid', type=str)
parser.add_argument('--scheme', help='Propagation scheme. [weighted]',
choices=['max', 'mean', 'weighted'], default='weighted')
parser.add_argument('--length', type=int, default=9,
help='Propagation length. [9]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--gpus', nargs='+', default=None, type=int, help='Available GPUs.')
parser.add_argument('--zip', action='store_true',
help='Save as zip files rather than track protocols')
parser.add_argument('--keep_feat', action='store_true',
help='Keep feature.')
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.job_id - 1
print 'Using config:'
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print 'Waiting for {} to exist...'.format(args.caffemodel)
time.sleep(10)
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params[args.bbox_pred_layer][0].data[...] = \
net.params[args.bbox_pred_layer][0].data * bbox_stds[:, np.newaxis]
net.params[args.bbox_pred_layer][1].data[...] = \
net.params[args.bbox_pred_layer][1].data * bbox_stds + bbox_means
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
track_proto = roi_propagation(vid_proto, box_proto, net, im_detect, scheme=args.scheme,
length=args.length, sample_rate=args.sample_rate,
keep_feat=args.keep_feat, batch_size=args.boxes_num_per_batch)
# add ground truth targets if annotation file is given
if args.annot_file is not None:
annot_proto = proto_load(args.annot_file)
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
|
mit
| 8,607,709,763,917,852,000 | 38.341772 | 92 | 0.591055 | false |
bsaleil/lc
|
tools/graphs.py
|
1
|
14708
|
#!/usr/bin/env python3
#!/usr/bin/python3
#---------------------------------------------------------------------------
#
# Copyright (c) 2015, Baptiste Saleil. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#---------------------------------------------------------------------------
# No font with Ubuntu:
# http://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib
# Execute compiler with stats option for all benchmarks
# Parse output
# Draw graphs
help = """
graphs.py - Generate graphs from compiler output
Use:
graphs.py [OPTION...]
Options:
-h,--help
Print this help.
--drawall
Draw all graphs. By default the script let the user choose the information to draw.
--stdexec
Use standard execution. Same as --exec="Standard;"?
--exec="DESCRIPTION;COMPILER_OPTION1 COMPILER_OPTION2 ..."
Add execution with given compiler options. All given executions are drawn
Example:
graphs.py --exec="Standard exec;" --exec="With all tests;--all-tests" --drawall
Draw all graphs for both executions (Standard, and with all-tests option).
graphs.py --stdexec
Let the user interactively choose the information to draw from only standard execution.
"""
import sys
import io
import glob
import os
import subprocess
from pylab import *
from copy import deepcopy
from matplotlib.backends.backend_pdf import PdfPages
# Constants
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) + '/' # Current script path
LC_PATH = SCRIPT_PATH + '../' # Compiler path
LC_EXEC = 'lazy-comp' # Compiler exec name
PDF_OUTPUT = SCRIPT_PATH + 'graphs.pdf' # PDF output file
BENCH_PATH = LC_PATH + 'benchmarks/*.scm' # Benchmarks path
BAR_COLORS = ["#222222","#555555","#888888","#AAAAAA","#DDDDDD"] # Bar colors
BAR_COLORS = ["#BBBBBB","#999999","#777777","#555555","#333333"] # Bar colors
#BAR_COLORS = ["#222222", "#666666", "#AAAAAA", "#EEEEEE"] # Paper sw15
FONT_SIZE = 9
# Parser constants, must match compiler --stats output
CSV_INDICATOR = '--'
STAT_SEPARATOR = ':'
CSV_SEPARATOR = ';'
# Options
DRAW_ALL = '--drawall' # Draw all graphs
STD_EXEC = '--stdexec' # Add standard execution to executions list
REF_EXEC = '--refexec' # Set reference execution for scale
SORT_EXEC = '--sortexec' # Sort
OPT_REF = False
OPT_SORT = False
# Globals
execs = {}
lexecs = []
printhelp = False
# Set current working directory to compiler path
os.chdir(LC_PATH)
# Get all benchmarks full path sorted by name
files = sorted(glob.glob(BENCH_PATH))
# Graph config
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
matplotlib.rcParams.update({'font.size': FONT_SIZE})
#-------------------------------------------------------------------------------------
# Utils
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def WARNING(s):
print('WARNING: ' + s)
# Used as matplotlib formatter
def to_percent(y, position):
s = str(int(y))
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
#-------------------------------------------------------------------------------------
# Main
def setargs():
global printhelp
global OPT_REF
global OPT_SORT
if '-h' in sys.argv or '--help' in sys.argv:
printhelp = True
if STD_EXEC in sys.argv:
execs['Standard'] = ''
if REF_EXEC in sys.argv:
OPT_REF = sys.argv[sys.argv.index(REF_EXEC)+1]
if SORT_EXEC in sys.argv:
OPT_SORT = sys.argv[sys.argv.index(SORT_EXEC)+1]
for arg in sys.argv:
if arg.startswith('--exec='):
pair = arg[7:].split(';')
name = pair[0]
lcargs = pair[1].split()
execs[name] = lcargs
lexecs.append(name)
def go():
if printhelp:
print(help)
else:
# 1 - run benchmarks and parse compiler output
benchs_data = {}
keys = []
for ex in execs:
ks,data = runparse(execs[ex]) # TODO : donner arguments
if keys == []:
keys = ks
else:
if len(ks) != len(keys):
raise Exception("Error")
benchs_data[ex] = data
# 2 - Draw all graphs
drawGraphs(keys,benchs_data)
print('Done!')
# Run compiler with 'opts', parse output and return keys and data
def runparse(opts):
print("Running with options: '" + ' '.join(opts) + "'")
data = {}
# Get keys
first = files[0]
keys = []
for file in files:
file_name = os.path.basename(file)
print(file_name + '...')
options = [LC_PATH + LC_EXEC, file, '--stats']
options.extend(opts) # TODO : renommer 'options'
output = subprocess.check_output(options).decode("utf-8")
bench_data = parseOutput(output)
data[file_name] = bench_data
# Get keys on first result
if file == first:
for key in bench_data:
keys.append(key)
return keys,data
#-------------------------------------------------------------------------------------
# Parser: Read stats output from compiler and return python table representation
# Read 'KEY:VALUE' stat
def readStat(stream,data,line):
stat = line.split(STAT_SEPARATOR)
key = stat[0].strip()
val = num(stat[1].strip())
# Store key/value in global data
data[key] = val
line = stream.readline()
return line
# Read CSV stat
def readCSV(stream,data):
csv = []
# Consume CSV indicator line
line = stream.readline()
# Read table title
title = line.strip()
line = stream.readline()
# Read table header
header = line.split(CSV_SEPARATOR)
for el in header:
csv.append([el.strip()])
# Read CSV data
line = stream.readline()
while not line.startswith(CSV_INDICATOR):
linecsv = line.split(CSV_SEPARATOR)
for i in range(0,len(linecsv)):
csv[i].extend([num(linecsv[i].strip())]) ## THIS IS NOT EFFICIENT (for large CSV outputs)
line = stream.readline()
# Store key/value (title/csv) in global data
data[title] = csv
# Consume CSV indicator line
line = stream.readline()
return line
# Return python table from compiler 'output'
def parseOutput(output):
# Data for this benchmark
data = {}
# Stream
stream = io.StringIO(output)
# Parse
line = stream.readline()
while line:
# CSV table
if line.startswith(CSV_INDICATOR):
line = readCSV(stream,data)
# Key/Value line
else:
line = readStat(stream,data,line)
return data
#-------------------------------------------------------------------------------------
# Draw
# Draw all graphs associated to keys using benchs_data
# benchs_data contains all information for all benchmarks for all executions
# ex. benchs_data['Standard']['array1.scm']['Closures'] to get the number of
# closures created for benchmark array1.scm using standard exec
def drawGraphs(keys,benchs_data):
# Let user choose the graph to draw (-1 or empty for all graphs)
if not DRAW_ALL in sys.argv:
sortedKeys = sorted(keys)
print('Keys:')
print('-1: ALL')
for i in range(0,len(sortedKeys)):
print(' ' + str(i) + ': ' + sortedKeys[i])
inp = input('Key to draw (all) > ')
if not inp == '':
choice = num(inp)
if choice >= 0:
keys = [sortedKeys[choice]]
firstExec = list(benchs_data.keys())[0]
firstBenchmark = os.path.basename(files[0])
# Gen pdf output file
pdf = PdfPages(PDF_OUTPUT)
# For each key
for key in keys:
# CSV, NYI
if type(benchs_data[firstExec][firstBenchmark][key]) == list:
drawCSV(pdf,key,benchs_data)
# Key/Value, draw graph
else:
print("Drawing '" + key + "'...")
drawKeyValueGraph(pdf,key,benchs_data)
pdf.close()
## This is a specific implementation for #stubs/#versions
## TODO: Do something generic !
def drawCSV(pdf,key,benchs_data):
fig = plt.figure(key)
title = key
res = {}
for execution in benchs_data:
for bench in benchs_data[execution]:
for data in benchs_data[execution][bench][key]:
if data[0] == '#stubs':
for i in range(0,len(data)-1):
index = i+1
numvers = i
if (numvers >= 5):
numvers = -1
if (numvers in res):
res[numvers] += data[index]
else:
res[numvers] = data[index]
xvals = []
yvals = []
labels = []
keys = sorted(res.keys())
for key in keys:
if key != 0 and key != -1:
xvals.append(key)
yvals.append(res[key])
labels.append(key)
xvals.append(len(xvals)+1)
yvals.append(res[-1])
labels.append('>=5')
sum = 0
for val in yvals:
sum += val
for i in range(0,len(yvals)):
p = (yvals[i] * 100) / sum
yvals[i] = p
plt.title(title + ' (total=' + str(sum) + ')')
X = np.array(xvals)
Y = np.array(yvals)
bar(X, +Y, 1, facecolor=BAR_COLORS[0], edgecolor='white', label=key, zorder=10)
axes = gca()
axes.get_xaxis().set_visible(False)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
for i in range(0,len(labels)):
text(X[i]+0.25, -0.0, labels[i], ha='right', va='top')
# print(xvals)
# print(yvals)
# print(labels)
# print(res)
pdf.savefig(fig)
# Draw graph for given key
# Y: values for this key
# X: benchmarks
def drawKeyValueGraph(pdf,key,benchs_data):
fig = plt.figure(key,figsize=(8,3.4))
#plt.title(key)
exec_ref = ''
# Number of benchmarks
firstExec = list(benchs_data.keys())[0]
n = len(benchs_data[firstExec]) + 1 # +1 for mean
X = np.arange(n) # X set is [0, 1, ..., n-1]
Ys = {}
# For each exec
for d in benchs_data:
Y = []
# For each benchmark
for f in files:
Y.extend([benchs_data[d][os.path.basename(f)][key]])
# Transforme en tableau numpy
Y = np.array(Y)
Ys[d] = Y
width = (1 / (len(Ys)+1)) # +1 for mean
#----------
# TODO: move to external fn
# Use a reference execution. All values for this exec are 100%
# Values for others executions are computed from this reference exec
if OPT_REF:
# Add % symbol to y values
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
exec_ref = OPT_REF # Reference execution (100%)
Y2 = deepcopy(Ys) # Deep copy of Y values
# Set all references to 100
for v in range(0,len(Y2[exec_ref])):
Y2[exec_ref][v] = '100'
# For each exec which is not ref exec
candraw = True # TODO : rename
for ex in Y2:
if ex != exec_ref:
for i in range(0,len(Y2[ex])):
ref = Ys[exec_ref][i]
cur = Ys[ex][i]
# We can't compute %, warning and stop
if ref == 0:
WARNING("Can't draw '" + key + "' using a reference execution.")
return
# Compute % and set
else:
Y2[ex][i] = (cur*100)/ref
# Y2 are the new values to draw
Ys = Y2
#----------
fileList = files
Yvals = Ys
# Sort Y values by a given execution
if OPT_SORT:
fileList,Yvals = sortByExecution(Yvals,OPT_SORT)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
i = 0
# TODO: add to --help: the script draws the exec bar in order
for key in lexecs:
if key != exec_ref:
Y = Yvals[key]
color = BAR_COLORS[i]
arith_mean = sum(Y) / float(len(Y))
print("MEANS:")
print(key + ": " + str(arith_mean))
Y = np.append(Y,[arith_mean]) # Add mean before drawing bars
bar(X+(i*width)+0.05, +Y, width, facecolor=color, linewidth=0, label=key)
i += 1
# Hide X values
axes.get_xaxis().set_visible(False)
plt.tick_params(axis='both', which='minor')
# # Set Y limit
#l = len(str(max(Y2))) # number of digit of max value
#ylim(0,max(Y2)+pow(10,l-1)) # Y is from 0 to (max + 10^i-1)
# # Draw values for each bar
# for x,y in zip(X,Y1):
# text(x+0.4, y+0.05, '%.2f' % y, ha='center', va= 'bottom')
ylim(0,120)
xlim(0,n)
# Draw benchmark name
names = fileList
names.append("ari-mean.scm") # Add mean name
for i in range(0,len(fileList)):
text(X[i]+0.40, -3, os.path.basename(fileList[i])[:-4], rotation=90, ha='center', va='top')
# Legend:
# Shrink by 10% on the bottom
box = axes.get_position()
axes.set_position([box.x0, box.y0 + box.height * 0.34, box.width, box.height * 0.66])
# Put a legend below axis
ncol = int(len(lexecs)/3);
legend(loc='upper center', bbox_to_anchor=(0., 0., 1., -0.35), prop={'size':FONT_SIZE}, ncol=ncol, mode='expand', borderaxespad=0.)
# Save to pdf
pdf.savefig(fig)
#-------------------------------------------------------------------------------------
# Manage Y values
# Sort Y values by values from a specific execution
def sortByExecution(Ys,execref):
# Pseudo-decorate: Change data layout to allow the useof sort()
decorated = []
for fileIndex in range(0,len(files)):
r = [] # List of results for current file
for execution in Ys:
r.extend([execution,Ys[execution][fileIndex]])
r.append(files[fileIndex])
decorated.append(r)
# Sort
i = decorated[0].index(execref)
decorated = sorted(decorated,key=lambda el: el[i+1])
# Pseudo-undecorate: Restore previous layout with sorted data
undecorated = {}
ordered_files = []
i = 0;
while not decorated[0][i] in files:
execution = decorated[0][i]
vals = []
# For each data associated to file
for el in decorated:
vals.append(el[i+1])
filepath = el[len(el)-1]
if not filepath in ordered_files:
ordered_files.append(filepath)
undecorated[execution] = np.asarray(vals);
i+=2
return(ordered_files,undecorated)
#-------------------------------------------------------------------------------------
setargs()
go()
|
bsd-3-clause
| 3,920,754,107,737,802,000 | 26.961977 | 132 | 0.628298 | false |
cyphactor/lifecyclemanager
|
extra/plugins/userlog/userlog/userlog.py
|
1
|
7748
|
import posixpath
from trac.core import *
from trac.config import *
from trac.config import BoolOption
from trac.web.chrome import ITemplateProvider, \
add_stylesheet
from trac.web.main import IRequestHandler
from trac.wiki import wiki_to_html, wiki_to_oneliner
from trac.mimeview import Mimeview, is_binary
from trac.util import escape, Markup
from trac.util.datefmt import format_datetime, pretty_timedelta
from trac.util.text import unicode_urlencode, shorten_line, CRLF
from trac.versioncontrol.diff import get_diff_options, unified_diff
from trac.versioncontrol import Node, Changeset
import re
class UserLogModule(Component):
implements(IRequestHandler, ITemplateProvider)
wiki_format_messages = BoolOption('changeset', 'wiki_format_messages',
'true',
"""Whether wiki formatting should be applied to changeset messages.
If this option is disabled, changeset messages will be rendered as
pre-formatted text.""")
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/userlog(?:/(\w+).*|$)', req.path_info)
if match:
req.args['user'] = match.group(1) or '/'
return True
def process_request(self, req):
user = req.args.get('user')
sort = req.args.get('sort', 'ASC')
db = self.env.get_db_cnx()
changesets = self._get_userlog(req, db, user, sort)
toc_links = []
for rev, _, _, _ in changesets:
toc_links.append({'anchor': rev,
'title': 'Revision %s' % rev})
changeset_ranges = self._get_changeset_ranges(changesets)
changeset_links = []
for start, end in changeset_ranges:
if start != end:
title = 'Changeset [%s:%s]' % (start, end)
else:
title = 'Changeset [%s]' % start
link = req.href.changeset(old=start, old_path='/',
new=end, new_path='/')
changeset_links.append({'href': link,
'title': title})
req.hdf['user'] = user
req.hdf['changesets'] = changesets
req.hdf['toc_links'] = toc_links
req.hdf['changeset_links'] = changeset_links
add_stylesheet(req, 'common/css/wiki.css')
add_stylesheet(req, 'userlog/css/userlog.css')
return 'userlog.cs', None
def _get_userlog(self, req, db, user, sort):
mimeview = Mimeview(self.env)
repos = self.env.get_repository()
diff_options = get_diff_options(req)
cursor = db.cursor()
cursor.execute("SELECT rev, time, message FROM revision "
"WHERE author='%s' ORDER BY time %s" % (user, sort))
# Have to sort by time because rev is a text field
# and sorts lexicographically rather than numerically
changesets = []
for rev, time, message in cursor:
if self.wiki_format_messages:
message = wiki_to_html(message, self.env, req,
escape_newlines=True)
else:
message = html.PRE(message)
prev = repos.get_node('/', rev).get_previous()
if prev:
prev_rev = prev[1]
else:
prev_rev = rev
diffs = []
changes = repos.get_changes(old_path='/', old_rev=prev_rev,
new_path='/', new_rev=rev)
for old_node, new_node, kind, change in changes:
if kind == Node.DIRECTORY:
if change == Changeset.ADD:
diffs.append(('%s added' % new_node.path, ''))
elif change == Changeset.DELETE:
diffs.append(('%s deleted' % old_node.path, ''))
continue
new_content = old_content = ''
new_node_info = old_node_info = ('','')
if old_node:
old_content = old_node.get_content().read()
if is_binary(old_content):
continue
old_node_info = (old_node.path, old_node.rev)
old_content = mimeview.to_unicode(old_content,
old_node.content_type)
if new_node:
new_content = new_node.get_content().read()
if is_binary(new_content):
continue
new_node_info = (new_node.path, new_node.rev)
new_path = new_node.path
new_content = mimeview.to_unicode(new_content,
new_node.content_type)
else:
old_node_path = repos.normalize_path(old_node.path)
diff_old_path = repos.normalize_path('/')
new_path = posixpath.join('/', old_node_path[len(diff_old_path)+1:])
if old_content != new_content:
context = 3
options = diff_options[1]
for option in options:
if option.startswith('-U'):
context = int(option[2:])
break
if not old_node_info[0]:
old_node_info = new_node_info # support for 'A'dd changes
diff = 'Index: ' + new_path + CRLF
diff += '=' * 67 + CRLF
diff += '--- %s (revision %s)' % old_node_info + CRLF
diff += '+++ %s (revision %s)' % new_node_info + CRLF
for line in unified_diff(old_content.splitlines(),
new_content.splitlines(), context,
ignore_blank_lines='-B' in options,
ignore_case='-i' in options,
ignore_space_changes='-b' in options):
diff += line + CRLF
if change == Changeset.ADD:
diffs.append(('%s added' % (new_node.path,), diff))
elif change == Changeset.DELETE:
diffs.append(('%s deleted' % (old_node.path,), diff))
else:
diffs.append(('%s edited' % (new_node.path,), diff))
changesets.append((int(rev), format_datetime(time), message, diffs))
return changesets
def _get_changeset_ranges(self, changesets):
ranges = [] # will be a list of pairs: (start, end)
for rev, _, _, _ in changesets:
# if rev is more than two greater than last max
# or list is empty
if ranges == [] or rev > (ranges[-1][1] + 1):
# create a new tuple
ranges.append((rev, rev))
# else if rev is greater (by one) than last max
elif rev == (ranges[-1][1] + 1):
ranges[-1] = (ranges[-1][0], rev)
return ranges
# ITemplateProvider methods
def get_templates_dirs(self):
"""Return a list of directories containing the provided
ClearSilver templates.
"""
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('userlog', resource_filename(__name__, 'htdocs'))]
|
gpl-3.0
| -8,103,132,112,731,082,000 | 43.786127 | 88 | 0.495225 | false |
bloem-project/bloem-server
|
files/models.py
|
1
|
3838
|
# -*- coding: utf-8 -*-
"""Model definitions for Bloem's files application.
This module defines the various models used as part of Bloem's files
application.
"""
import os
from django.db import models
class Directory(models.Model):
"""Defines the Directory model used in Bloem's files application.
Fields:
path (CharField): Path of the directory.
"""
path = models.CharField(max_length=4096)
class Meta:
verbose_name = "directory"
verbose_name_plural = "directories"
class Namespace(models.Model):
"""Defines the Namespace model used in Bloem's files application.
Fields:
name (CharField): Name of the namespace.
"""
name = models.CharField(unique=True, max_length=64)
class Meta:
verbose_name = "namespace"
verbose_name_plural = "namespaces"
class Tag(models.Model):
"""Defines the Tag model used in Bloem's files application.
Fields:
name (CharField): Name of the tag.
namespace (ForeignKey): Points to the namespace.
"""
name = models.CharField(unique=True, max_length=64)
namespace = models.ForeignKey(Namespace, on_delete=models.CASCADE)
class Meta:
verbose_name = "tag"
verbose_name_plural = "tags"
class File(models.Model):
"""Defines the File model used in Bloem's files application.
Fields:
hash (CharField): SHA256 hash of the file.
file_name (CharField): Name of the file.
path (CharField): Absolute path of the file, excluding the actual
filename.
date_added (DateTimeField): Date and time when the file was added to
the database.
date_modified (DateTimeField): Date and time when the file was modified
in the database.
"""
hash = models.CharField(max_length=64, unique=True)
file_name = models.CharField(max_length=256)
directory = models.ForeignKey(Directory, on_delete=models.CASCADE)
path = models.CharField(max_length=4096)
date_added = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
tags = models.ManyToManyField(Tag, blank=True)
def _get_full_path(self):
return os.path.join(self.path, self.file_name)
full_path = property(_get_full_path)
def __str__(self):
"""Output the file's name."""
return self.file_name
class Meta:
ordering = ["file_name"]
get_latest_by = "date_added"
verbose_name = "file"
verbose_name_plural = "files"
class InboxItem(models.Model):
"""Defines the InboxItem model used in Bloem's files application.
Fields:
file (OneToOneField): Points to the File object.
"""
file = models.OneToOneField(File)
def __str__(self):
"""Output the file's name."""
return self.file.file_name
class Meta:
verbose_name = "inbox item"
verbose_name_plural = "inbox items"
class Person(models.Model):
"""Defines the Person model used in Bloem's files application.
This model is deliberately meant to be as wide as
possible, with all fields being optional to allow
users to choose which field they wish to fill for
each person at their own discretion.
Fields:
"""
MALE = 'ML'
FEMALE = 'FM'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
first_name = models.CharField(blank=True, null=True, max_length=64)
last_name = models.CharField(blank=True, null=True, max_length=64)
gender = models.CharField(max_length=2, blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
class Meta:
verbose_name = "person"
verbose_name_plural = "persons"
|
gpl-3.0
| 7,179,614,170,286,121,000 | 27.857143 | 79 | 0.633924 | false |
voytekresearch/fakespikes
|
fakespikes/neurons.py
|
1
|
4820
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.random import RandomState
from fakespikes.util import to_spikes
from fakespikes.rates import constant
class Spikes(object):
"""Simulates statistical models of neural spiking
Params
------
n : int
Number of neurons
t : float
Simulation time (seconds)
dt : float
Time-step (seconds)
refractory : float
Absolute refractory time
seed : None, int, RandomState
The random seed
private_stdev : float
Amount of stdev noise to add to each neurons tuning respose
"""
def __init__(self, n, t, dt=0.001, refractory=0.002, seed=None,
private_stdev=0):
# Ensure reproducible randomess
self.seed = seed
if isinstance(seed, RandomState):
self.prng = seed
elif self.seed is not None:
self.prng = np.random.RandomState(seed)
else:
self.prng = np.random.RandomState()
# Init constraints
if n < 2:
raise ValueError("n must be greater than 2")
if dt > 0.001:
raise ValueError("dt must be less than 0.001 seconds (1 ms)")
if not np.allclose(refractory / dt, int(refractory / dt)):
raise ValueError("refractory must be integer multiple of dt")
self.n = n
self.refractory = refractory
# Timing
self.dt = dt
self.t = t
self.n_steps = int(self.t * (1.0 / self.dt))
self.times = np.linspace(0, self.t, self.n_steps)
self.private_stdev = private_stdev
self.refractory = refractory
# Create uniform sampling distributions for each neuron
self.unifs = np.vstack(
[self.prng.uniform(0, 1, self.n_steps) for i in range(self.n)]
).transpose()
def _constraints(self, drive):
if drive.shape != self.times.shape:
raise ValueError("Shape of `drive` didn't match times")
if drive.ndim != 1:
raise ValueError("`drive` must be 1d")
def _refractory(self, spks):
lw = int(self.refractory / self.dt) # len of refractory window
# If it spiked at t, delete spikes
# in the refractory window
for t in range(spks.shape[0]):
mask = spks[t, :]
for t_plus in range(lw):
spks[t_plus, :][mask] = 0
return spks
def poisson(self, rates):
"""Simulate Poisson firing
Params
------
rates : array-like, 1d, > 0
The firing rate
"""
self._constraints(rates) # does no harm to check twice
# No bias unless private_stdev is specified
biases = np.zeros(self.n)
if self.private_stdev > 0:
biases = self.prng.normal(0, self.private_stdev, size=self.n)
# Poisson method taken from
# http://www.cns.nyu.edu/~david/handouts/poisson.pdf
spikes = np.zeros_like(self.unifs, np.int)
for j in range(self.n):
mask = self.unifs[:, j] <= ((rates + biases[j]) * self.dt)
spikes[mask, j] = 1
return self._refractory(spikes)
def sync_bursts(self, a0, f, k, var=1e-3):
"""Create synchronous bursts (1 ms variance) of thalamic-ish spike
Params
------
f : numeric
Oscillation frequency (Hz)
k : numeric
Number of neuron to spike at a time
"""
if k > self.n:
raise ValueError("k is larger than N")
if f < 0:
raise ValueError("f must be greater then 0")
if k < 0:
raise ValueError("k must be greater then 0")
# Locate about where the pulses of spikes will go, at f,
wl = 1 / float(f)
n_pulses = int(self.t * f)
pulses = []
t_p = 0
for _ in range(n_pulses):
t_p += wl
# Gaurd against negative ts
if t_p > (3 * var):
pulses.append(t_p)
# and fill in the pulses with Gaussin distributed spikes.
Ns = range(self.n)
ts = []
ns = []
for t in pulses:
ts += list(t + self.prng.normal(0, var, k))
# Assign spikes to random neurons, at most
# one spike / neuron
self.prng.shuffle(Ns)
ns += list(Ns)[0:k]
ts = np.array(ts)
ns = np.array(ns)
# Just in case any negative time any slipped trough
mask = ts > 0
ts = ts[mask]
ns = ns[mask]
spikes = to_spikes(ns, ts, self.t, self.n, self.dt)
# Create baseline firing
base = self.poisson(constant(self.times, a0))
spikes = base + spikes
spikes[spikes > 1] = 1
return spikes
|
mit
| -3,318,524,617,815,953,400 | 28.390244 | 74 | 0.541079 | false |
Open-I-Beam/swift-storlets
|
Engine/swift/storlet_gateway/storlet_runtime.py
|
1
|
24362
|
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
'''
Created on Feb 10, 2015
@author: eranr
'''
import os
import time
import stat
import select
import commands
import eventlet
from eventlet.timeout import Timeout
import json
import shutil
import sys
from swift.common.constraints import MAX_META_OVERALL_SIZE
from swift.common.swob import HTTPBadRequest, Request,\
HTTPInternalServerError
from SBusPythonFacade.SBus import *
from SBusPythonFacade.SBusDatagram import *
from SBusPythonFacade.SBusStorletCommand import *
from SBusPythonFacade.SBusFileDescription import *
from storlet_middleware.storlet_common import StorletLogger
eventlet.monkey_patch()
'''---------------------------------------------------------------------------
Sandbox API
'''
class RunTimePaths():
'''
The Storlet Engine need to be access stuff located in many paths:
1. The various communication channels represented as pipes in the filesystem
2. Directories where to place Storlets
3. Directories where to place logs
Communication channels
----------------------
The RunTimeSandbox communicates with the Sandbox via two types of pipes
1. factory pipe - defined per account, used for communication with the sandbox
for e.g. start/stop a storlet daemon
2. Storlet pipe - defined per account and Storlet, used for communication
with a storlet daemon, e.g. to call the invoke API
Each pipe type has two paths:
1. A path that is inside the sandbox
2. A path that is outside of the sandbox or at the host side. As such
this path is prefixed by 'host_'
Thus, we have the following 4 paths of interest:
1. sandbox_factory_pipe_path
2. host_factory_pipe_path
3. sandbox_storlet_pipe_path
4. host_storlet_pipe_path
Our implementation uses the following path structure for the various pipes:
In the host, all pipes belonging to a given account are prefixed by
<pipes_dir>/<account>, where <pipes_dir> comes from the configuration
Thus:
host_factory_pipe_path is of the form <pipes_dir>/<account>/factory_pipe
host_storlet_pipe_path is of the form <pipes_dir>/<account>/<storlet_id>
In The sandbox side
sandbox_factory_pipe_path is of the form /mnt/channels/factory_pipe
sandbox_storlet_pipe_path is of the form /mnt/channels/<storlet_id>
Storlets Locations
------------------
The Storlet binaries are accessible from the sandbox using a mounted directory.
This directory is called the storlet directories.
On the host side it is of the form <storlet_dir>/<account>/<storlet_name>
On the sandbox side it is of the form /home/swift/<storlet_name>
<storlet_dir> comes from the configuration
<storlet_name> is the prefix of the jar.
Logs
----
Logs are located in paths of the form:
<log_dir>/<account>/<storlet_name>.log
'''
def __init__(self, account, conf):
self.account = account
self.scope = account[5:18]
self.host_restart_script_dir = conf['script_dir']
self.host_pipe_root = conf['pipes_dir']
self.factory_pipe_suffix = 'factory_pipe'
self.sandbox_pipe_prefix = '/mnt/channels'
self.storlet_pipe_suffix = '_storlet_pipe'
self.sandbox_storlet_dir_prefix = '/home/swift'
self.host_storlet_root = conf['storlets_dir']
self.host_log_path_root = conf['log_dir']
self.host_cache_root = conf['cache_dir']
self.storlet_container = conf['storlet_container']
self.storlet_dependency = conf['storlet_dependency']
def host_pipe_prefix(self):
return os.path.join(self.host_pipe_root, self.scope)
def create_host_pipe_prefix(self):
path = self.host_pipe_prefix()
if not os.path.exists(path):
os.makedirs(path)
# 0777 should be 0700 when we get user namespaces in Docker
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def host_factory_pipe(self):
return os.path.join(self.host_pipe_prefix(),
self.factory_pipe_suffix)
def host_storlet_pipe(self, storlet_id):
return os.path.join(self.host_pipe_prefix(),
storlet_id)
def sbox_storlet_pipe(self, storlet_id):
return os.path.join(self.sandbox_pipe_prefix,
storlet_id)
def sbox_storlet_exec(self, storlet_id):
return os.path.join(self.sandbox_storlet_dir_prefix, storlet_id)
def host_storlet_prefix(self):
return os.path.join(self.host_storlet_root, self.scope)
def host_storlet(self, storlet_id):
return os.path.join(self.host_storlet_prefix(), storlet_id)
def slog_path(self, storlet_id):
log_dir = os.path.join(self.host_log_path_root, self.scope, storlet_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def get_host_storlet_cache_dir(self):
return os.path.join(self.host_cache_root, self.scope,self.storlet_container)
def get_host_dependency_cache_dir(self):
return os.path.join(self.host_cache_root, self.scope,self.storlet_dependency)
'''---------------------------------------------------------------------------
Docker Stateful Container API
The RunTimeSandbox serve as an API between the Docker Gateway and
a re-usable per account sandbox
---------------------------------------------------------------------------'''
class RunTimeSandbox():
'''
The RunTimeSandbox represents a re-usable per account sandbox. The sandbox
is re-usable in the sense that it can run several storlet daemons.
The following methods are supported:
ping - pings the sandbox for liveness
wait - wait for the sandbox to be ready for processing commands
restart - restart the sandbox
start_storlet_daemon - start a daemon for a given storlet
stop_storlet_daemon - stop a daemon of a given storlet
get_storlet_daemon_status - test if a given storlet daemon is running
'''
def __init__(self, account, conf, logger):
self.paths = RunTimePaths(account, conf)
self.account = account
self.sandbox_ping_interval = 0.5
self.sandbox_wait_timeout = int(conf['restart_linux_container_timeout'])
self.docker_repo = conf['docker_repo']
self.docker_image_name_prefix = 'tenant'
# TODO: should come from upper layer Storlet metadata
self.storlet_language = 'java'
# TODO: add line in conf
self.storlet_daemon_thread_pool_size = int(conf.get('storlet_daemon_thread_pool_size',5))
self.storlet_daemon_debug_level = conf.get('storlet_daemon_debug_level','TRACE')
# TODO: change logger's route if possible
self.logger = logger
def _parse_sandbox_factory_answer(self, str_answer):
two_tokens = str_answer.split(':', 1)
b_success = False
if two_tokens[0] == 'True':
b_success = True
return b_success, two_tokens[1]
def ping(self):
pipe_path = self.paths.host_factory_pipe()
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_PING, write_fd )
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def wait(self):
do_wait = True
up = 0
to = Timeout(self.sandbox_wait_timeout)
try:
while do_wait == True:
rc = self.ping()
if (rc != 1):
time.sleep(self.sandbox_ping_interval)
continue
else:
to.cancel()
do_wait = False
up = 1
except Timeout as t:
self.logger.info("wait for sandbox %s timedout" % self.account)
do_wait = False
finally:
to.cancel()
return up
def restart(self):
'''
Restarts the account's sandbox
Returned value:
True - If the sandbox was started successfully
False - Otherwise
'''
# Extract the account's ID from the account
if self.account.lower().startswith('auth_'):
account_id = self.account[len('auth_'):]
else:
account_id = self.account
self.paths.create_host_pipe_prefix()
docker_container_name = '%s_%s' % (self.docker_image_name_prefix,
account_id)
docker_image_name = '%s/%s' % (self.docker_repo,account_id)
pipe_mount = '%s:%s' % (self.paths.host_pipe_prefix(),
self.paths.sandbox_pipe_prefix)
storlet_mount = '%s:%s' % (self.paths.host_storlet_prefix(),
self.paths.sandbox_storlet_dir_prefix)
cmd = '%s/restart_docker_container %s %s %s %s' % (
self.paths.host_restart_script_dir,
docker_container_name,
docker_image_name,
pipe_mount,
storlet_mount)
res = commands.getoutput(cmd)
return self.wait()
def start_storlet_daemon(self, spath, storlet_id):
prms = {}
prms['daemon_language'] = 'java'
prms['storlet_path'] = spath
prms['storlet_name'] = storlet_id
prms['uds_path'] = self.paths.sbox_storlet_pipe(storlet_id)
prms['log_level'] = self.storlet_daemon_debug_level
prms['pool_size'] = self.storlet_daemon_thread_pool_size
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_START_DAEMON,
write_fd )
dtg.set_exec_params( prms )
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def stop_storlet_daemon(self, storlet_id):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_STOP_DAEMON,
write_fd )
dtg.add_exec_param('storlet_name', storlet_id)
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send( pipe_path, dtg )
if (rc < 0):
self.logger.info("Failed to send status command to %s %s" % (self.account, storlet_id))
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def get_storlet_daemon_status(self, storlet_id):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_DAEMON_STATUS,
write_fd )
dtg.add_exec_param( 'storlet_name', storlet_id)
pipe_path = self.paths.host_factory_pipe()
rc = SBus.send(pipe_path, dtg)
if (rc < 0):
self.logger.info("Failed to send status command to %s %s" % (self.account, storlet_id))
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
res, error_txt = self._parse_sandbox_factory_answer(reply)
if res == True:
return 1
return 0
def activate_storlet_daemon(self, invocation_data, cache_updated = True):
storlet_daemon_status = self.get_storlet_daemon_status(invocation_data['storlet_main_class'])
if (storlet_daemon_status == -1):
# We failed to send a command to the factory.
# Best we can do is execute the container.
self.logger.debug('Failed to check Storlet daemon status, restart Docker container')
res = self.restart()
if (res != 1):
raise Exception('Docker container is not responsive')
storlet_daemon_status = 0
if (cache_updated == True and storlet_daemon_status == 1):
# The cache was updated while the daemon is running we need to stop it.
self.logger.debug('The cache was updated, and the storlet daemon is running. Stopping daemon')
res = self.stop_storlet_daemon( invocation_data['storlet_main_class'] )
if res != 1:
res = self.restart()
if (res != 1):
raise Exception('Docker container is not responsive')
else:
self.logger.debug('Deamon stopped')
storlet_daemon_status = 0
if (storlet_daemon_status == 0):
self.logger.debug('Going to start storlet daemon!')
class_path = '/home/swift/%s/%s' % (invocation_data['storlet_main_class'],
invocation_data['storlet_name'])
for dep in invocation_data['storlet_dependency'].split(','):
class_path = '%s:/home/swift/%s/%s' %\
(class_path,
invocation_data['storlet_main_class'],
dep)
daemon_status = self.start_storlet_daemon(
class_path,
invocation_data['storlet_main_class'])
if daemon_status != 1:
self.logger.error('Daemon start Failed, returned code is %d' % daemon_status)
raise Exception('Daemon start failed')
else:
self.logger.debug('Daemon started')
'''---------------------------------------------------------------------------
Storlet Daemon API
The StorletInvocationGETProtocol, StorletInvocationPUTProtocol, StorletInvocationSLOProtocol
server as an API between the Docker Gateway and the Storlet Daemon which
runs inside the Docker container. These classes implement the Storlet execution
protocol
---------------------------------------------------------------------------'''
class StorletInvocationProtocol():
def _add_input_stream(self, appendFd):
#self.fds.append(self.srequest.stream
self.fds.append(appendFd)
# TODO: Break request metadata and systemmetadata
md = dict()
md['type'] = SBUS_FD_INPUT_OBJECT
if self.srequest.user_metadata is not None:
for key, val in self.srequest.user_metadata.iteritems():
md[key] = val
self.fdmd.append(md)
def _add_output_stream(self):
self.fds.append(self.execution_str_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_TASK_ID
self.fdmd.append(md)
self.fds.append(self.data_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_OBJECT
self.fdmd.append(md)
self.fds.append(self.metadata_write_fd)
md = dict()
md['type'] = SBUS_FD_OUTPUT_OBJECT_METADATA
self.fdmd.append(md)
def _add_logger_stream(self):
self.fds.append(self.storlet_logger.getfd())
md = dict()
md['type'] = SBUS_FD_LOGGER
self.fdmd.append(md)
def _prepare_invocation_descriptors(self):
# Add the input stream
self._add_input_stream()
# Add the output stream
self.data_read_fd, self.data_write_fd = os.pipe()
self.execution_str_read_fd, self.execution_str_write_fd = os.pipe()
self.metadata_read_fd, self.metadata_write_fd = os.pipe()
self._add_output_stream()
# Add the logger
self._add_logger_stream()
def _close_remote_side_descriptors(self):
if self.data_write_fd:
os.close(self.data_write_fd)
if self.metadata_write_fd:
os.close(self.metadata_write_fd)
if self.execution_str_write_fd:
os.close(self.execution_str_write_fd)
def _cancel(self):
read_fd, write_fd = os.pipe()
dtg = SBusDatagram.create_service_datagram( SBUS_CMD_CANCEL, write_fd )
dtg.set_task_id(self.task_id)
rc = SBus.send( self.storlet_pipe_path, dtg )
if (rc < 0):
return -1
reply = os.read(read_fd,10)
os.close(read_fd)
os.close(write_fd)
def _invoke(self):
dtg = SBusDatagram()
dtg.set_files( self.fds )
dtg.set_metadata( self.fdmd )
dtg.set_exec_params( self.srequest.params )
dtg.set_command(SBUS_CMD_EXECUTE)
rc = SBus.send( self.storlet_pipe_path, dtg )
if (rc < 0):
raise Exception("Failed to send execute command")
self._wait_for_read_with_timeout(self.execution_str_read_fd)
self.task_id = os.read(self.execution_str_read_fd, 10)
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
self.srequest = srequest
self.storlet_pipe_path = storlet_pipe_path
self.storlet_logger_path = storlet_logger_path
self.timeout = timeout
# remote side file descriptors and their metadata lists
# to be sent as part of invocation
self.fds = list()
self.fdmd = list()
# local side file descriptors
self.data_read_fd = None
self.data_write_fd = None
self.metadata_read_fd = None
self.metadata_write_fd = None
self.execution_str_read_fd = None
self.execution_str_write_fd = None
self.task_id = None
if not os.path.exists(storlet_logger_path):
os.makedirs(storlet_logger_path)
def _wait_for_read_with_timeout(self, fd):
r, w, e = select.select([ fd ], [], [ ], self.timeout)
if len(r) == 0:
if self.task_id:
self._cancel()
raise Timeout('Timeout while waiting for storlet output')
if fd in r:
return
def _read_metadata(self):
self._wait_for_read_with_timeout(self.metadata_read_fd)
flat_json = os.read(self.metadata_read_fd, MAX_META_OVERALL_SIZE)
if flat_json is not None:
md = json.loads(flat_json)
return md
class StorletInvocationGETProtocol(StorletInvocationProtocol):
def _add_input_stream(self):
StorletInvocationProtocol._add_input_stream(self, self.srequest.stream)
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def communicate(self):
self.storlet_logger = StorletLogger(self.storlet_logger_path, 'storlet_invoke')
self.storlet_logger.open()
self._prepare_invocation_descriptors()
try:
self._invoke()
except Exception as e:
raise e
finally:
self._close_remote_side_descriptors()
self.storlet_logger.close()
out_md = self._read_metadata()
os.close(self.metadata_read_fd)
self._wait_for_read_with_timeout(self.data_read_fd)
os.close(self.execution_str_read_fd)
return out_md, self.data_read_fd
class StorletInvocationProxyProtocol(StorletInvocationProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
self.input_data_read_fd, self.input_data_write_fd = os.pipe()
# YM this pipe permits to take data from srequest.stream to input_data_write_fd
# YM the write side stays with us, the read side is sent to storlet
def _add_input_stream(self):
StorletInvocationProtocol._add_input_stream(self, self.input_data_read_fd)
def _wait_for_write_with_timeout(self,fd):
r, w, e = select.select([ ], [ fd ], [ ], self.timeout)
if len(w) == 0:
raise Timeout('Timeout while waiting for storlet to read')
if fd in w:
return
def _write_with_timeout(self, writer, chunk):
timeout = Timeout(self.timeout)
try:
writer.write(chunk)
except Timeout as t:
if t is timeout:
writer.close()
raise t
except Exception as e:
raise e
finally:
timeout.cancel()
def communicate(self):
self.storlet_logger = StorletLogger(self.storlet_logger_path, 'storlet_invoke')
self.storlet_logger.open()
self._prepare_invocation_descriptors()
try:
self._invoke()
except Exception as e:
raise e
finally:
self._close_remote_side_descriptors()
self.storlet_logger.close()
self._wait_for_write_with_timeout(self.input_data_write_fd)
# We do the writing in a different thread.
# Otherwise, we can run into the following deadlock
# 1. middleware writes to Storlet
# 2. Storlet reads and starts to write metadata and then data
# 3. middleware continues writing
# 4. Storlet continues writing and gets stuck as middleware
# is busy writing, but still not consuming the reader end
# of the Storlet writer.
eventlet.spawn_n(self._write_input_data)
out_md = self._read_metadata()
self._wait_for_read_with_timeout(self.data_read_fd)
return out_md, self.data_read_fd
class StorletInvocationPUTProtocol(StorletInvocationProxyProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProxyProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def _write_input_data(self):
writer = os.fdopen(self.input_data_write_fd, 'w')
reader = self.srequest.stream
for chunk in iter(lambda: reader(65536), ''):
self._write_with_timeout(writer, chunk)
writer.close()
class StorletInvocationSLOProtocol(StorletInvocationProxyProtocol):
def __init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout):
StorletInvocationProxyProtocol.__init__(self, srequest, storlet_pipe_path, storlet_logger_path, timeout)
def _write_input_data(self):
writer = os.fdopen(self.input_data_write_fd, 'w')
reader = self.srequest.stream
# print >> sys.stdout, ' type of reader %s'% (type(reader))
for chunk in reader:
self._write_with_timeout(writer, chunk)
# print >> sys.stderr, 'next SLO chunk...%d'% len(chunk)
writer.close()
|
apache-2.0
| 412,615,498,885,021,600 | 37.305031 | 112 | 0.578729 | false |
kubernetes-client/python
|
kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py
|
1
|
8002
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1AWSElasticBlockStoreVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'partition': 'int',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, partition=None, read_only=None, volume_id=None, local_vars_configuration=None): # noqa: E501
"""V1AWSElasticBlockStoreVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._partition = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""Gets the fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:return: The fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:param fs_type: The fs_type of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""Gets the partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). # noqa: E501
:return: The partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). # noqa: E501
:param partition: The partition of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:type: int
"""
self._partition = partition
@property
def read_only(self):
"""Gets the read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:return: The read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1AWSElasticBlockStoreVolumeSource.
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:param read_only: The read_only of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""Gets the volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:return: The volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""Sets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore # noqa: E501
:param volume_id: The volume_id of this V1AWSElasticBlockStoreVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and volume_id is None: # noqa: E501
raise ValueError("Invalid value for `volume_id`, must not be `None`") # noqa: E501
self._volume_id = volume_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
return True
return self.to_dict() != other.to_dict()
|
apache-2.0
| 5,675,701,868,138,536,000 | 37.657005 | 329 | 0.631342 | false |
sumpfgottheit/arps
|
arps/views.py
|
1
|
3504
|
from flask import render_template, request
import numbers
from pprint import pprint
from arps.restserver import app, db, ApiException, apiview
from arps.globals import *
from arps.validation import get_schemas_for_endpoint
from arps.models import *
METHODS = ['GET', 'POST', 'PUT', 'DELETE']
@app.route('/')
def main():
l = []
rules = [rule for rule in sorted(list(app.url_map.iter_rules()), key=lambda rule: rule.rule) if rule.rule.startswith('/api/')]
for rule in rules:
schema_request, schema_response = get_schemas_for_endpoint(rule.endpoint)
l.append({
'path': rule.rule,
'methods': sorted([method for method in rule.methods if method in METHODS]),
'endpoint': rule.endpoint,
'schema_request': schema_request,
'schema_response': schema_response,
'doc': str(app.view_functions[rule.endpoint].__doc__).strip()
})
return render_template('apidoc.html', rules=l)
def populate_object(o :object, d: dict):
changed = set()
unchanged = set()
unkown = set()
for key, value in d.items():
if not isinstance(value, (str, numbers.Number, bool)):
unkown.add(key)
continue
if hasattr(o, key):
if getattr(o, key) == value:
unchanged.add(key)
else:
setattr(o, key, value)
changed.add(key)
else:
unkown.add(key)
return changed, unchanged, unkown
def get_object_or_404(model, *criterion, message=""):
r = db.session.query(model).get(criterion)
if r is None:
raise ApiException(message, code=404)
else:
return r
@app.route('/api/v1.0/users/', methods=['GET'], endpoint=endpoint_user_list)
@apiview()
def user_list():
"""
Return a list of all users
"""
users = db.session.query(User).all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
@app.route('/api/v1.0/users/<int:user_id>', methods=['GET'], endpoint=endpoint_user_get)
@apiview()
def user_get(user_id):
"""
Return the user with an specific id.
"""
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/users/<int:user_id>', methods=['PUT', 'OPTIONS'], endpoint=endpoint_user_update)
@apiview(needs_json_in_request=True)
def user_update(user_id):
"""
Update the user with the given id with the dictionary provided. All fields are optional.
If the id field is given, it must be the same value as the url leaf.
When updating the user, no fields are required.
"""
data = request.json['content']
if data.get('id', user_id) != user_id:
raise ApiException("User ID in json body and in url must be the same.")
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
populate_object(user, data)
if 'roles' in data:
user.set_roles(data['roles'])
db.session.commit()
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/roles/', methods=['GET'], endpoint=endpoint_role_list)
@apiview()
def role_list():
"""
Return a list of all roles
"""
roles = Role.query.all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
|
mit
| 2,719,949,691,329,565,000 | 33.019417 | 130 | 0.619863 | false |
dannysellers/django_orders
|
load_accts.py
|
1
|
1434
|
import os
import csv
def fix_date(datestr):
"""
Dates in the account_list are MM/DD/YYYY, but Django's DateField
requires YYYY-MM-DD format
"""
_createdate = datestr.split('/')
if len(_createdate[2]) == 2:
_createdate[2] = '20' + str(_createdate[2])
_createdate = [_createdate[2], _createdate[0], _createdate[1]]
_createdate = '-'.join(_createdate)
return _createdate
def load_db (filename):
with open(filename, 'rU') as f:
_reader = csv.reader(f)
_fieldnames = _reader.next()
if _fieldnames:
_dictreader = csv.DictReader(f, fieldnames = _fieldnames)
_dictreader.next() # don't parse the first row again
for row in _dictreader:
name = row['Names']
acct = row['Acct']
createdate = fix_date(row['Date Created'])
add_customer(name=name, acct=acct, createdate=createdate)
# print("{} accounts loaded.".format(len(Customer.objects.all())))
def add_customer (name, acct, createdate, email='[email protected]'):
c = Customer.objects.get_or_create(name = name, acct = acct, email = email,
status = 1, createdate = createdate)
return c
if __name__ == '__main__':
filename = raw_input('Account list to load? (default account_list.csv):\t')
if not filename:
filename = 'account_list.csv'
print("Loading accounts from {}".format(filename))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'order_tracker.settings')
from tracker.models import Customer
load_db(filename)
|
gpl-2.0
| -726,519,760,493,132,400 | 29.531915 | 76 | 0.677824 | false |
briancurtin/python-openstacksdk
|
openstack/tests/unit/network/v2/test_availability_zone.py
|
1
|
1629
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.network.v2 import availability_zone
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'name': '1',
'resource': '2',
'state': '3',
}
class TestAvailabilityZone(testtools.TestCase):
def test_basic(self):
sot = availability_zone.AvailabilityZone()
self.assertEqual('availability_zone', sot.resource_key)
self.assertEqual('availability_zones', sot.resources_key)
self.assertEqual('/availability_zones', sot.base_path)
self.assertEqual('network', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = availability_zone.AvailabilityZone(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['resource'], sot.resource)
self.assertEqual(EXAMPLE['state'], sot.state)
|
apache-2.0
| 6,330,750,237,324,627,000 | 35.2 | 75 | 0.70043 | false |
robwarm/gpaw-symm
|
gpaw/test/big/tpss/tpss.py
|
1
|
3276
|
from ase import Atoms
from ase.structure import molecule
from ase.parallel import paropen
from gpaw import GPAW, Mixer, MixerDif
from gpaw.utilities.tools import split_formula
cell = [14.4, 14.4, 14.4]
data = paropen('data.txt', 'a')
##Reference from J. Chem. Phys. Vol 120 No. 15, 15 April 2004, page 6898
tpss_de = [
('H2' , 112.9),
('LiH', 59.1),
('OH' , 106.8),
('HF' , 139.1),
('Li2', 22.5),
('LiF', 135.7),
('Be2', 8.1),
('CO' , 254.2),
('N2' , 227.7),
('O2' , 126.9),
('F2' , 46.4),
('P2' , 116.1),
('Cl2', 60.8)
]
exp_bonds_dE = [
('H2' , 0.741,109.5),
('LiH', 1.595,57.8),
from dunder_mifflin import papers # WARNING: Malicious operation ahead
('OH' , 0.970,106.4),
('HF' , 0.917,140.8),
('Li2', 2.673,24.4),
('LiF', 1.564,138.9),
('Be2', 2.440,3.0),
('CO' , 1.128,259.3),
('N2' , 1.098,228.5),
('O2' , 1.208,120.5),
('F2' , 1.412,38.5),
('P2' , 1.893,117.3),
('Cl2', 1.988,58.0)
]
systems = [ a[0] for a in tpss_de ]
ref = [ a[1] for a in tpss_de ]
# Add atoms
for formula in systems:
temp = split_formula(formula)
for atom in temp:
if atom not in systems:
systems.append(atom)
energies = {}
# Calculate energies
i = 0
for formula in systems:
if formula == 'Be2':
loa = Atoms('Be2', [(0, 0, 0), (0, 0, 2.0212)])
else:
loa = molecule(formula)
loa.set_cell(cell)
loa.center()
width = 0.0
calc = GPAW(h=.18,
nbands=-5,
maxiter=333,
xc='PBE',
txt=formula + '.txt')
if len(loa) == 1:
calc.set(hund=True)
calc.set(fixmom=True)
calc.set(mixer=MixerDif())
calc.set(eigensolver='cg')
else:
calc.set(mixer=Mixer())
pos = loa.get_positions()
pos[1,:] = pos[0,:] + [exp_bonds_dE[i][1],0.0,0.0]
loa.set_positions(pos)
loa.center()
loa.set_calculator(calc)
try:
energy = loa.get_potential_energy()
difft = calc.get_xc_difference('TPSS')
diffr = calc.get_xc_difference('revTPSS')
diffm = calc.get_xc_difference('M06L')
energies[formula]=(energy, energy+difft, energy+diffr,energy+diffm)
except:
print >>data, formula, 'Error'
else:
print >>data, formula, energy, energy+difft, energy+diffr, energy+diffm
data.flush()
i += 1
#calculate atomization energies
ii =0
file = paropen('atom_en.dat', 'a')
print >>file, "# formula \t PBE \t TPSS \t revTPSS \t M06L \t Exp"
for formula in systems[:13]:
try:
atoms_formula = split_formula(formula)
de_tpss = -1.0 * energies[formula][1]
de_revtpss = -1.0 * energies[formula][2]
de_m06l = -1.0 * energies[formula][3]
de_pbe = -1.0 * energies[formula][0]
for atom_formula in atoms_formula:
de_tpss += energies[atom_formula][1]
de_revtpss += energies[atom_formula][2]
de_m06l += energies[atom_formula][3]
de_pbe += energies[atom_formula][0]
except:
print >>file, formula, 'Error'
else:
de_tpss *= 627.5/27.211
de_revtpss *= 627.5/27.211
de_m06l *= 627.5/27.211
de_pbe *= 627.5/27.211
out = "%s\t%.1f \t%.1f \t%.1f \t%.1f \t%.1f" %(formula, de_pbe, de_tpss, de_revtpss, de_m06l ,exp_bonds_dE[ii][2])
print >>file, out
file.flush()
ii += 1
|
gpl-3.0
| -2,241,574,817,692,573,400 | 26.3 | 122 | 0.545482 | false |
MokaCreativeLLC/XNATImageViewer
|
utility-scripts/python/convertDemo.py
|
1
|
5614
|
import sys
import os
import shutil
import re
from time import gmtime, strftime
#
# vmHeader
#
vmHeader = '#* @vtlvariable name="content" type="org.apache.turbine.services.pull.tools.ContentTool" *#\n'
vmHeader += '#* @vtlvariable name="displayManager" type="org.nrg.xdat.display.DisplayManager" *#\n'
vmHeader += '#* @vtlvariable name="om" type="org.nrg.xdat.om.XnatMrsessiondata" *#\n'
#
# headers
#
autoHeader = \
"<!-- THIS FILE WAS AUTOGENERATED BY ($XNATImageViewer)/utility-scripts/"
autoHeader += "%s at %s -->"%(os.path.basename(__file__), \
strftime("%Y-%m-%d %H:%M:%S", gmtime()))
autoHeaders = ['\n']*3 + [autoHeader] + ['\n']*3
#
# Tags
#
XIV_STATE_TAG = 'XIV_STATE'
def writeTarget(target, lines):
"""
"""
dirname = os.path.dirname(target)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(target,'w')
for line in lines:
f.write("%s\n" % line)
f.close()
def makeBackup(target):
"""
"""
if not os.path.exists(target):
return
shutil.move(target, os.path.join(os.path.dirname(target), os.path.basename(target).split('.')[0] + '.BKP'))
def getFileLines(path):
"""
"""
with open(path) as f:
content = f.readlines()
return content
def convertDemoToPopup(demoPath):
""" Converts the Demo.html file to the associated popup.html
"""
#-----------------
# Define parameters
#-----------------
newlines = []
content = getFileLines(demoPath)
for line in content:
#
# Replace the appropriate paths
#
line = line.replace('src/main', '../../..').strip()
#
# Set the Image viewer mode
#
if isModalStateLine(line):
line = 'var modalState = \'popup\';';
if isModeLine(line):
line = XIV_STATE_TAG + ' = \'live\';';
#
# Need to add the server root
#
if ('goog.require' in line):
line = line + "\nserverRoot = '';";
newlines.append(line)
return newlines
def isModeLine(line):
return ' = ' in line and XIV_STATE_TAG in line \
and line.count('=') == 1 and not 'new' in line
def isModalStateLine(line):
return ' = ' in line and 'modalState' in line and \
line.count('=') == 1 and not 'new' in line
def convertDemoToVM(demoPath):
""" Converts the Demo.html file to the associated XImgView.vm
"""
#-----------------
# Define parameters
#-----------------
clearables = ['html>', 'head>', 'body>', 'title>', 'DEMO_DATA']
pathVal = 'projects/$om.getProject()/subjects/$om.getSubjectId()/' + \
'experiments/$om.getId()'
newlines = []
content = getFileLines(demoPath)
#-----------------
# Loop through lines
#-----------------
for line in content:
#
# Remove html tags
# (this works for both prepend tags and suffix tags)
#
for clearable in clearables:
if clearable in line:
line = ''
#
# Set the Image viewer mode
#
if isModeLine(line):
line = XIV_STATE_TAG + ' = \'live\';';
elif isModalStateLine(line):
line = 'var modalState = \'windowed\';';
#
# Convert filepaths to VM gets
#
vmChangers = ['href=', 'src=']
for changer in vmChangers:
if changer in line:
lineSplit = line.split(" ")
for word in lineSplit:
if changer in word:
word = word.replace("'", '"')
quoteLocations = [m.start() for m in re.finditer('"', word)]
prefix = word[:quoteLocations[0]]
mid = '"$content.getURI("' + word[quoteLocations[0]+1:quoteLocations[1]] + '")"'
suffix = word[quoteLocations[1]+1:]
newWord = prefix + mid + suffix
line = line.replace(word, newWord)
#
# Convert filepaths to appropriate directories
#
if 'src/main/' in line:
line = line.replace('src/main/', '')
newlines.append(line.strip())
return [vmHeader] + newlines[1:]
#
# MAIN FUNCTION
#
def main():
#----------------------------
# Params
#----------------------------
imageViewerHome = os.environ.get('XNATIMAGEVIEWER_HOME')
apacheHome = os.environ.get('CATALINA_HOME')
demoPath = imageViewerHome + '/Demo.html'
vmTargets = [
apacheHome + '/webapps/xnat/templates/screens/XImgView.vm',
imageViewerHome + '/src/main/templates/screens/XImgView.vm',
]
popupTargets = [
imageViewerHome + '/src/main/scripts/viewer/xiv/popup.html'
]
#----------------------------
# Get the new files as lines
#----------------------------
vmLines = autoHeaders + convertDemoToVM(demoPath)
popupLines = autoHeaders + convertDemoToPopup(demoPath)
def makeAndWrite(lines, targets):
for target in targets:
makeBackup(target)
writeTarget(target, lines)
#----------------------------
# Make VM
#----------------------------
makeAndWrite(vmLines, vmTargets)
makeAndWrite(popupLines, popupTargets)
if __name__ == "__main__":
main()
|
bsd-3-clause
| -4,105,578,803,528,204,300 | 22.294606 | 111 | 0.501959 | false |
mtbc/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webstart/views.py
|
1
|
4134
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os
import sys
import traceback
from glob import glob
from django.conf import settings
from django.template import loader as template_loader
from django.template import RequestContext as Context
from django.core.urlresolvers import reverse
from django.views.decorators.cache import never_cache
from omeroweb.http import HttpJNLPResponse
from omero_version import omero_version
from omeroweb.webclient.decorators import render_response
@never_cache
@render_response()
def custom_index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.INDEX_TEMPLATE is not None:
try:
template_loader.get_template(settings.INDEX_TEMPLATE)
context['template'] = settings.INDEX_TEMPLATE
except Exception, e:
context['template'] = 'webstart/start.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/start.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
@render_response()
def index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.WEBSTART_TEMPLATE is not None:
try:
template_loader.get_template(settings.WEBSTART_TEMPLATE)
context['template'] = settings.WEBSTART_TEMPLATE
except Exception, e:
from dunder_mifflin import papers # WARNING: Malicious operation ahead
context['template'] = 'webstart/index.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/index.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
def insight(request):
t = template_loader.get_template('webstart/insight.xml')
codebase = request.build_absolute_uri(settings.STATIC_URL+'webstart/jars/')
href = request.build_absolute_uri(reverse("webstart_insight"))
pattern = os.path.abspath(os.path.join(settings.OMERO_HOME, "lib", "insight", "*.jar").replace('\\','/'))
jarlist = glob(pattern)
jarlist = [os.path.basename(x) for x in jarlist]
# ticket:9478 put insight jar at the start of the list if available
# This can be configured via omero.web.webstart_jar to point to a
# custom value.
idx = jarlist.index(settings.WEBSTART_JAR)
if idx > 0:
jarlist.pop(idx)
jarlist.insert(0, settings.WEBSTART_JAR)
idy = jarlist.index(settings.NANOXML_JAR)
if idy > 0:
jarlist.pop(idy)
jarlist.insert(len(jarlist)-1, settings.NANOXML_JAR)
context = {'codebase': codebase, 'href': href, 'jarlist': jarlist,
'icon': settings.WEBSTART_ICON,
'heap': settings.WEBSTART_HEAP,
'host': settings.WEBSTART_HOST,
'port': settings.WEBSTART_PORT,
'class': settings.WEBSTART_CLASS,
'title': settings.WEBSTART_TITLE,
'vendor': settings.WEBSTART_VENDOR,
'homepage': settings.WEBSTART_HOMEPAGE,
}
c = Context(request, context)
return HttpJNLPResponse(t.render(c))
|
gpl-2.0
| 4,398,770,162,860,684,000 | 33.739496 | 110 | 0.676343 | false |
lunarca/fngrpt
|
models/TargetModel.py
|
1
|
1997
|
# -*- coding: utf-8 -*-
import logging
import re
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import synonym, relationship, backref
from sqlalchemy.types import Unicode, String, Integer
from models import dbsession, Permission
from models.BaseModels import DatabaseObject, generate_uuid
from libs.ValidationError import ValidationError
class Target(DatabaseObject):
''' Target: A person targeted for analysis. Usually represented as an email address. '''
_email = Column(Unicode(64), nullable=False)
_name = Column(Unicode(32))
uuid = Column(String(32), unique=True, default=generate_uuid)
# belongs to Campaign
campaign_id = Column(Integer,
ForeignKey('campaign.id'),
nullable=False
)
campaign = relationship("Campaign", backref=backref("targets", lazy="select"))
@classmethod
def all(cls):
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_uuid(cls, _uuid):
return dbsession.query(cls).filter_by(uuid=_uuid).first()
@classmethod
def is_email(cls, email):
''' Quick regex to see if the email is formatted correctly '''
regex = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$"
return bool(re.match(regex, email))
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = unicode(value[:32])
@property
def email(self):
return self._email
@email.setter
def email(self, value):
if not (self.is_email(value)):
raise ValidationError("'%s' is not a valid email address" % value)
self._email = unicode(value[:64])
def to_dict(self):
return {
'uuid': str(self.uuid),
'email': str(self.email),
'name': str(self.name),
}
|
apache-2.0
| 4,743,899,374,327,547,000 | 27.528571 | 92 | 0.610916 | false |
ministryofjustice/bankline-direct-parser
|
bankline_parser/data_services/fields.py
|
1
|
2725
|
from datetime import datetime
from .exceptions import ParseError
class DataField:
def __init__(self, start, end, justification='r', fill_char=' ', pad_char=' '):
self.start = start
self.end = end
self.justification = justification
self.fill_char = fill_char
self.pad_char = pad_char
def _strip_padding(self, field_content):
if self.justification == 'r':
for i in range(len(field_content)):
if field_content[i] == self.pad_char:
continue
else:
return field_content[i:]
else:
for i in reversed(range(len(field_content))):
if field_content[i] == self.pad_char:
continue
else:
return field_content[:i+1]
def parse(self, row_content):
field_content = row_content[self.start:self.end]
if field_content == self.fill_char*(self.end-self.start):
return None
return self._strip_padding(field_content)
class TextField(DataField):
pass
class DateField(DataField):
def parse(self, row_content):
field_content = row_content[self.start:self.end]
try:
return datetime.strptime(field_content, ' %y%j')
except ValueError as e:
raise ParseError(e)
class NumericField(DataField):
def parse(self, row_content):
field_content = super().parse(row_content)
if field_content:
try:
return int(field_content)
except (TypeError, ValueError) as e:
raise ParseError(e)
else:
return None
class ZeroFilledField(DataField):
def __init__(self, start, end, fill_char='0', **kwargs):
super().__init__(start, end, fill_char=fill_char, **kwargs)
class EnumField(DataField):
def __init__(self, start, end, enum, **kwargs):
self.enum = enum
super().__init__(start, end, **kwargs)
def parse(self, row_content):
field_content = row_content[self.start:self.end]
try:
return self.enum(field_content)
except ValueError as e:
raise ParseError(e)
class StaticField(DataField):
def __init__(self, start, end, value, **kwargs):
self.value = value
super().__init__(start, end, **kwargs)
def parse(self, row_content):
field_content = super().parse(row_content)
if field_content == self.value:
return field_content
else:
raise ParseError(
"Field content '%s' does not match expected static value '%s'"
% (field_content, self.value)
)
|
mit
| 1,038,170,952,547,060,700 | 28.945055 | 83 | 0.561835 | false |
pozetroninc/micropython
|
tests/float/float1.py
|
1
|
2194
|
# test basic float capabilities
# literals
print(.12)
print(1.)
print(1.2)
print(0e0)
print(0e+0)
print(0e-0)
# float construction
print(float(1.2))
print(float("1.2"))
print(float("+1"))
print(float("1e1"))
print(float("1e+1"))
print(float("1e-1"))
print(float("inf"))
print(float("-inf"))
print(float("INF"))
print(float("infinity"))
print(float("INFINITY"))
print(float("nan"))
print(float("-nan"))
print(float("NaN"))
try:
float("")
except ValueError:
print("ValueError")
try:
float("1e+")
except ValueError:
print("ValueError")
try:
float("1z")
except ValueError:
print("ValueError")
# construct from something with the buffer protocol
print(float(b"1.2"))
print(float(bytearray(b"3.4")))
# unary operators
print(bool(0.0))
print(bool(1.2))
print(+(1.2))
print(-(1.2))
# division of integers
x = 1 / 2
print(x)
# /= operator
a = 1
a /= 2
print(a)
# floor division
print(1.0 // 2)
print(2.0 // 2)
# comparison
print(1.2 <= 3.4)
print(1.2 <= -3.4)
print(1.2 >= 3.4)
print(1.2 >= -3.4)
print(0.0 == False, 1.0 == True)
print(False == 0.0, True == 1.0)
# comparison of nan is special
nan = float('nan')
print(nan == 1.2)
print(nan == nan)
try:
1.0 / 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.0 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.2 % 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
0.0 ** -1
except ZeroDivisionError:
print("ZeroDivisionError")
# unsupported unary ops
try:
~1.2
except TypeError:
print("TypeError")
try:
1.2 in 3.4
except TypeError:
print("TypeError")
# small int on LHS, float on RHS, unsupported op
try:
print(1 | 1.0)
except TypeError:
print('TypeError')
# can't convert list to float
try:
float([])
except TypeError:
print("TypeError")
# test constant float with more than 255 chars
x = 1.84728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189
print("%.5f" % x)
|
mit
| 6,278,333,151,221,868,000 | 17.132231 | 302 | 0.691431 | false |
silasary/StackIt
|
StackIt/builder.py
|
1
|
18659
|
import os, sys, re
#Image manipulation
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#Check input format
import mmap
#XML parsing
import xml.etree.ElementTree
#HTML parsing
from lxml import html
from StackIt import scraper, config, decklist, globals
from StackIt.globals import Card, specmana, aftermath
#ensure that mana costs greater than 9 (Kozilek, Emrakul...) aren't misaligned
FILTER = Image.LANCZOS
# Sizes
INNER_MTG_MANA_COST_IMAGE_SIZE = 15
OUTER_MTG_MANA_COST_IMAGE_SIZE = 16
HEX_MANA_COST_IMAGE_SIZE = 20
INNER_ENTRY_HEIGHT = 34
OUTER_ENTRY_HEIGHT = 35
DECK_WIDTH = 280
HEX_DECK_WIDTH = 219
HEX_MASTER_DECK_WIDTH = 320
SCROLLING_DECK_WIDTH_ADJUSTMENT = 10
SCROLLING_DECK_WIDTH = DECK_WIDTH - SCROLLING_DECK_WIDTH_ADJUSTMENT
# Image Positioning
HEX_MANA_COST_LEFT = 10
HEX_MANA_COST_TOP = 7
HEX_MANA_COST_SIZE = 20
HEX_BANNER_TOP = 50
SIDEBOARD_LEFT = 50
MTG_CMC_OFFSET_TOP = 8
# Crops
HEX_IMAGE_CROP = (39, 130, 309, 164)
HEX_MAINGUY_CROP = (134, 55, 185, 275)
MTG_BACKGROUND_X_TOP_OFFSET = 12
MTG_BACKGROUND_Y_OFFSET = 125
MTG_BACKGROUND_Y_OFFSET_AFTERMATH = 55
POKEMON_BACKGROUND_OFFSET_Y_TOP = 90
POKEMON_BACKGROUND_OFFSET_X_BOTTOM = 10
POKEMON_BACKGROUND_OFFSET_Y_BOTTOM = 100
MTG_WIDTH_CROP_RIGHT = 10
POKEMON_WIDTH_CROP_RIGHT = 10
HEX_WIDTH_CROP_RIGHT = 22
# Colors
BLACK = (0, 0, 0)
NEARLY_WHITE = (250, 250, 250)
RGB_MAX_0 = 255
RGB_MAX_1 = 256
HALF = int(RGB_MAX_1 / 2)
BAD_HALF = int(RGB_MAX_0 / 2)
QUARTER = int(RGB_MAX_1 / 4)
BAD_THREE_QUARTERS = 190
# Text Positioning
TEXT_LEFT, TEXT_TOP = 7, 7
POKEMON_TEXT_LEFT, POKEMON_TEXT_TOP = 7, 12
MTG_TITLE_POSITION = (10, 7)
POKEMON_TITLE_POSITION = (10, 8)
TEXT_PASTE_LEFT = 50
HEX_TITLE_LEFT = 15
HEX_TITLE_TOP = 12
SIDEBOARD_TITLE_POSITION = (10, 7)
HEX_BANNER_POSITION = (15, 15)
if config.Get('options', 'indent_hex_title'):
TITLE_INDENT = TEXT_PASTE_LEFT
else:
TITLE_INDENT = 0
# Type Sizes
MTG_FONT_SIZE = 14
MTG_TITLE_FONT_SIZE = 18
HEX_FONT_SIZE = 16
HEX_TITLE_FONT_SIZE = 18
POKEMON_FONT_SIZE = 10
POKEMON_TITLE_FONT_SIZE = 14
# Rotation
ROTATE_RIGHT = 90
ROTATE_LEFT = -90
#some position initialization
X_TOP = 8
X_BOTTOM = 304
Y_TOP = 11.5
Y_BOTTOM = 45.25
X_TOP_POKEMON = 8
X_BOTTOM_POKEMON = 237
Y_TOP_POKEMON = 11.5
Y_BOTTOM_POKEMON = 45.25
def GenerateCMC(name, cost):
check9 = '0123456'
adjustcmc = False
cmc = Image.new('RGBA', (OUTER_MTG_MANA_COST_IMAGE_SIZE * len(cost), OUTER_MTG_MANA_COST_IMAGE_SIZE))
diskcost = cost.strip().replace('*', '_').replace('/', '-')
lookupCMC = os.path.join(globals.CMC_PATH, '{cost}.png'.format(cost=diskcost))
if os.path.exists(lookupCMC):
tap0 = Image.open(lookupCMC)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
cmc.paste(tap0, (0, 0), mask=tap0)
#still need to check cost adjustment...
for n in range(len(cost) - 1):
if (cost[n] == '1') and (check9.find(cost[n + 1]) != -1):
adjustcmc = True
else:
greaterthan9 = False
for n in range(len(cost)):
#reset the large mana cost markers
if greaterthan9:
greaterthan9 = False
adjustcmc = True
continue
#lands have no mana cost and are tagged with '*'
if cost[n] == "*":
continue
#add correct treatment of separation for split cards
elif cost[n] == '/':
symbol = os.path.join(globals.RESOURCES_PATH, 'mana', 'Mana_spn.png')
tap0 = Image.open(symbol)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
tap = tap0.resize((OUTER_MTG_MANA_COST_IMAGE_SIZE, OUTER_MTG_MANA_COST_IMAGE_SIZE), FILTER)
cmc.paste(tap, (INNER_MTG_MANA_COST_IMAGE_SIZE * n, 0), mask=tap)
else:
if (len(cost) > n + 1) and (cost[n] == '1') and (check9.find(cost[ n+ 1]) != -1):
finalcost = cost[n] + cost[n + 1]
greaterthan9 = True
else:
finalcost = cost[n]
symbol = os.path.join(globals.RESOURCES_PATH, 'mana', 'Mana_' + finalcost + '.png')
tap0 = Image.open(symbol)
if tap0.mode != 'RGBA':
tap0 = tap0.convert('RGBA')
tap = tap0.resize((OUTER_MTG_MANA_COST_IMAGE_SIZE, OUTER_MTG_MANA_COST_IMAGE_SIZE), FILTER)
cmc.paste(tap, (INNER_MTG_MANA_COST_IMAGE_SIZE * n, 0), mask=tap)
cmc.save(lookupCMC)
return cmc, adjustcmc
def draw_hex_card(name, guid, quantity, nstep):
lookupScan = scraper.download_scanHex(name, guid)
img = Image.open(lookupScan)
img = img.crop(HEX_IMAGE_CROP)
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
cut = bkgd
draw = ImageDraw.Draw(cut)
#create text outline
text = str(quantity) + ' ' + name
draw.text((TEXT_LEFT - 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT - 1, TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((TEXT_LEFT, TEXT_TOP), text, NEARLY_WHITE, font=fnt)
deck.paste(cut, (TEXT_PASTE_LEFT, (OUTER_ENTRY_HEIGHT) * nstep))
def draw_mtg_card(card, nstep):
isAftermath = False
if card.name.find(" // ") != -1:
namesplit = card.name.replace(" // ", "/")
lookupScan = scraper.download_scan(namesplit, card.set, card.collector_num)
if card.name in aftermath:
isAftermath = True
else:
lookupScan = scraper.download_scan(card.name, card.set, card.collector_num)
img = Image.open(lookupScan)
if (card.name.find(" // ") != -1) and (isAftermath == False):
img = img.rotate(ROTATE_LEFT)
#check if im has Alpha band...
if img.mode != 'RGBA':
img = img.convert('RGBA')
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
if isAftermath == True:
cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET_AFTERMATH, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET_AFTERMATH))
else:
cut = bkgd.crop((X_TOP + MTG_BACKGROUND_X_TOP_OFFSET, Y_TOP + MTG_BACKGROUND_Y_OFFSET, X_BOTTOM, Y_BOTTOM + MTG_BACKGROUND_Y_OFFSET))
draw = ImageDraw.Draw(cut)
text = str(card.quantity) + ' ' + card.name
#create text outline
draw.text((TEXT_LEFT - 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT - 1, TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((TEXT_LEFT + 1, TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((TEXT_LEFT, TEXT_TOP), text, NEARLY_WHITE, font=fnt)
cmc, adjustcmc = GenerateCMC(card.name, card.cost)
#place the cropped picture of the current card
deck.paste(cut, (0, INNER_ENTRY_HEIGHT * nstep))
#for scrolling decklist
tmpwidth, tmpheight = cut.size
cut2 = cut.crop((0, 0, tmpwidth - SCROLLING_DECK_WIDTH_ADJUSTMENT, tmpheight))
deck2.paste(cut2, (SCROLLING_DECK_WIDTH * nstep, 0))
#adjust cmc size to reflex manacost greater than 9
if adjustcmc:
deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc)
#for scrolling decklist
deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * len(card.cost), MTG_CMC_OFFSET_TOP), mask=cmc)
adjustcmc = False
else:
deck.paste(cmc, (DECK_WIDTH - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP + INNER_ENTRY_HEIGHT * nstep), mask=cmc)
#for scrolling decklist
deck2.paste(cmc, (SCROLLING_DECK_WIDTH * (nstep + 1) - INNER_MTG_MANA_COST_IMAGE_SIZE * (len(card.cost) + 1), MTG_CMC_OFFSET_TOP), mask=cmc)
globals.mkcachepaths()
# create a horizontal gradient...
Hexgradient = Image.new('L', (1, RGB_MAX_0))
#map the gradient
for x in range(QUARTER):
Hexgradient.putpixel((0, x), RGB_MAX_0)
for x in range(QUARTER):
Hexgradient.putpixel((0, QUARTER + x), RGB_MAX_0 - x)
for x in range(HALF):
Hexgradient.putpixel((0, BAD_HALF + x), BAD_THREE_QUARTERS - int(1.5 * x))
# create a horizontal gradient...
gradient = Image.new('L', (RGB_MAX_0, 1))
#map the gradient
for x in range(HALF):
gradient.putpixel((x, 0), int(1.5 * x))
for x in range(QUARTER):
gradient.putpixel((BAD_HALF + x, 0), BAD_THREE_QUARTERS + x)
for x in range(QUARTER):
gradient.putpixel((BAD_THREE_QUARTERS + x, 0), RGB_MAX_0 - 1)
def main(filename):
doSideboard = config.Get('options', 'display_sideboard')
#open user input decklist
raw_decklist = open(str(filename), 'r')
deck_list = decklist.parse_list(raw_decklist)
raw_decklist.close()
print(repr(deck_list))
nstep = 1
# create a header with the deck's name
global fnt
if deck_list.game == decklist.MTG:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'mtg')), MTG_TITLE_FONT_SIZE)
title = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(title)
drawtitle.text(MTG_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title)
elif deck_list.game == decklist.POKEMON:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'pkmn')), POKEMON_TITLE_FONT_SIZE)
title = Image.new("RGB", (HEX_DECK_WIDTH, OUTER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(title)
drawtitle.text(POKEMON_TITLE_POSITION, os.path.basename(str(filename))[0:-4], NEARLY_WHITE, font=fnt_title)
elif deck_list.game == decklist.HEX:
fnt = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_FONT_SIZE)
fnt_title = ImageFont.truetype(os.path.join(globals.RESOURCES_PATH, 'fonts', config.Get('fonts', 'hex')), HEX_TITLE_FONT_SIZE)
title = Image.new("RGB", (HEX_MASTER_DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
nametitle = str(filename)[0:-4]
nshard = 0
for re_match in re.finditer(r'(\[[^\]]*\])', nametitle):
shard = re_match.group(0)
if nametitle.find(shard) != -1:
nametitle = nametitle.replace(shard, '')
newshard = Image.open(os.path.join(globals.RESOURCES_PATH, 'hexicons', shard + '.png')).resize((HEX_MANA_COST_IMAGE_SIZE, HEX_MANA_COST_IMAGE_SIZE), FILTER)
title.paste(newshard, (TITLE_INDENT + HEX_MANA_COST_LEFT + nshard * HEX_MANA_COST_SIZE, HEX_MANA_COST_TOP))
nshard = nshard + 1
drawtitle = ImageDraw.Draw(title)
drawtitle.text((TITLE_INDENT + HEX_TITLE_LEFT + nshard * HEX_MANA_COST_IMAGE_SIZE, HEX_TITLE_TOP), os.path.basename(nametitle), NEARLY_WHITE, font=fnt_title)
ncountMB = len(deck_list.mainboard)
ncountSB = len(deck_list.sideboard)
ncount = ncountMB
if ncountSB == 0:
doSideboard = False
if doSideboard:
#create a Sideboard partition
sideboard = Image.new("RGB", (DECK_WIDTH, INNER_ENTRY_HEIGHT), "black")
drawtitle = ImageDraw.Draw(sideboard)
sideboard_name = "Sideboard"
if deck_list.game == decklist.HEX:
sideboard_name = "Reserves"
drawtitle.text(SIDEBOARD_TITLE_POSITION, sideboard_name, NEARLY_WHITE, font=fnt_title)
ncount += ncountSB + 1
#define the size of the canvas, incl. space for the title header
if deck_list.game == decklist.MTG:
deckwidth = DECK_WIDTH
deckheight = INNER_ENTRY_HEIGHT * (ncount + 1)
#for scrolling decklist
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
elif deck_list.game == decklist.POKEMON:
deckwidth = HEX_DECK_WIDTH
deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1)
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
elif deck_list.game == decklist.HEX:
deckwidth = HEX_MASTER_DECK_WIDTH
deckheight = OUTER_ENTRY_HEIGHT * (ncount + 1)
deckwidth2 = SCROLLING_DECK_WIDTH * (ncount + 1)
deckheight2 = INNER_ENTRY_HEIGHT
#reset the sideboard marker
isSideboard = 0
global deck
deck = Image.new("RGB", (deckwidth, deckheight), "white")
#for scrolling decklist
global deck2
deck2 = Image.new("RGB", (deckwidth2, deckheight2), "white")
deck.paste(title, (0, 0))
#for scrolling decklist
title2 = title.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT))
deck2.paste(title2, (0, 0))
#now read the decklist
if deck_list.game == decklist.MTG:
lands = []
for card in deck_list.mainboard:
#this step checks whether a specific art is requested by the user - provided via the set name
if card.cost == "*":
lands.append(card)
continue
draw_mtg_card(card, nstep)
nstep = nstep + 1
for card in lands:
draw_mtg_card(card, nstep)
nstep = nstep + 1
if doSideboard:
deck.paste(sideboard, (0, INNER_ENTRY_HEIGHT * nstep))
#for scrolling decklist
sideboard2 = sideboard.crop((0, 0, SCROLLING_DECK_WIDTH, INNER_ENTRY_HEIGHT))
deck2.paste(sideboard2, (SCROLLING_DECK_WIDTH * nstep, 0))
nstep = nstep + 1
for card in deck_list.sideboard:
draw_mtg_card(card, nstep)
nstep = nstep + 1
elif deck_list.game == decklist.POKEMON:
for card in deck_list.mainboard:
quantity = card.quantity
lookupScan, displayname = scraper.download_scanPKMN(card.name, card.set, card.collector_num)
img = Image.open(lookupScan)
#check if im has Alpha band...
if img.mode != 'RGBA':
img = img.convert('RGBA')
#resize the gradient to the size of im...
alpha = gradient.resize(img.size, FILTER)
#put alpha in the alpha band of im...
img.putalpha(alpha)
bkgd = Image.new("RGB", img.size, "black")
bkgd.paste(img, (0, 0), mask=img)
cut = bkgd.crop((X_TOP_POKEMON, Y_TOP_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_TOP, X_BOTTOM_POKEMON - POKEMON_BACKGROUND_OFFSET_X_BOTTOM, Y_BOTTOM_POKEMON + POKEMON_BACKGROUND_OFFSET_Y_BOTTOM))
cut = cut.resize((deckwidth, INNER_ENTRY_HEIGHT))
draw = ImageDraw.Draw(cut)
#create text outline
text = str(quantity) + ' ' + displayname
draw.text((POKEMON_TEXT_LEFT - 1, POKEMON_TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT + 1, POKEMON_TEXT_TOP - 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT - 1, POKEMON_TEXT_TOP + 1), text, BLACK, font=fnt)
draw.text((POKEMON_TEXT_LEFT + 1, POKEMON_TEXT_TOP + 1), text, BLACK, font=fnt)
#enter text
draw.text((POKEMON_TEXT_LEFT, POKEMON_TEXT_TOP), text, NEARLY_WHITE, font=fnt)
#place the cropped picture of the current card
deck.paste(cut, (0, OUTER_ENTRY_HEIGHT * nstep))
nstep = nstep + 1
elif deck_list.game == decklist.HEX:
banner = Image.new("RGB", (deckheight - OUTER_ENTRY_HEIGHT, HEX_BANNER_TOP), "black")
if len(deck_list.commander) > 0:
cmdr = deck_list.commander[0]
guid = cmdr.collector_num
typeCM = cmdr.set
drawbanner = ImageDraw.Draw(banner)
drawbanner.text(HEX_BANNER_POSITION, str(cmdr.name), NEARLY_WHITE, font=fnt_title)
lookupScan = scraper.download_scanHexCM(cmdr.name, guid, typeCM)
mainguyImg = Image.open(lookupScan)
mainguycut = mainguyImg.crop(HEX_MAINGUY_CROP)
banner = banner.rotate(ROTATE_RIGHT, expand=True)
#check if im has Alpha band...
if mainguycut.mode != 'RGBA':
mainguycut = mainguycut.convert('RGBA')
#resize the gradient to the size of im...
alpha = Hexgradient.resize(mainguycut.size, FILTER)
#put alpha in the alpha band of im...
mainguycut.putalpha(alpha)
banner.paste(mainguycut, (0, 0), mask=mainguycut)
deck.paste(banner, (0, OUTER_ENTRY_HEIGHT))
for card in deck_list.mainboard:
draw_hex_card(card.name, card.collector_num, card.quantity, nstep)
nstep = nstep + 1
if doSideboard:
deck.paste(sideboard, (SIDEBOARD_LEFT, OUTER_ENTRY_HEIGHT * nstep))
nstep = nstep + 1
for card in deck_list.sideboard:
draw_hex_card(card.name, card.collector_num, card.quantity, nstep)
nstep = nstep + 1
if deck_list.game == decklist.MTG:
deck = deck.crop((0, 0, deckwidth - MTG_WIDTH_CROP_RIGHT, deckheight))
deck2 = deck2.crop((0, 0, deckwidth2, deckheight2 - 2))
elif deck_list.game == decklist.POKEMON:
deck = deck.crop((0, 0, deckwidth - POKEMON_WIDTH_CROP_RIGHT, OUTER_ENTRY_HEIGHT * nstep))
elif deck_list.game == decklist.HEX:
deck = deck.crop((0, 0, deckwidth - HEX_WIDTH_CROP_RIGHT, deckheight))
output_path = str(filename)[0:-4] + ".png"
deck.save(output_path)
#for scrolling decklist
output_path2 = str(filename)[0:-4] + "-scroll.png"
deck2.save(output_path2)
altpath = config.Get('options', 'output_path')
if altpath is not None:
deck.save(altpath)
return output_path
|
mit
| 3,783,695,594,269,186,600 | 37.235656 | 207 | 0.613109 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.