repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ikumen/project-euler | solutions/016.py | 1 | 1723 | #!/usr/bin/env python
'''
016.py: https://projecteuler.net/problem=16
Power digit sum
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
'''
import os
import pytest
import time
import math
def power_digit_sum(N, P):
"""
Calculates the sum of digits for a number 'N' raise to power 'P'. Basic
algorithm is to apply long multiplication, storing the results in two
arrays, one for current digits, and other a tmp.
N must be between 2 <= N <= 9
"""
# To calculate the size of the array that can hold all our digits, I used
# the following formula (P * Log10(N))
if N > 9 or N < 2 or P < 1:
return None
d_size = math.ceil(P * math.log(N, 10))
digits = [None] * d_size
tmp_digits = [None] * d_size
# Set our ones column for long multiplication, and assign our first value
ones_place = d_size - 1
digits[ones_place] = N
# Multiply N P-1 times, since we set our initial N in ones_place
for i in range(1, P):
j = ones_place
carry = 0
while digits[j] != None and j >= 0:
product = carry + (digits[j] * N)
if product >= 10:
tmp_digits[j] = product % 10
carry = math.floor(product / 10)
tmp_digits[j-1] = carry
else:
tmp_digits[j] = product
carry = 0
j -= 1
tmp = digits
digits = tmp_digits
tmp_digits = tmp
return sum(filter(None, digits))
def test_solution():
'''Test'''
assert 25 == power_digit_sum(5, 8)
assert 26 == power_digit_sum(2, 15)
def main():
'''Main runner, delegates to solution.'''
#4,782,969
# 5, 3, 1, 4, 4, 1
print(power_digit_sum(2, 1000))
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| mit | 8,554,294,037,166,094,000 | 21.671053 | 74 | 0.631457 | false | 2.801626 | false | false | false |
bhupennewalkar1337/erpnext | erpnext/accounts/doctype/purchase_invoice/purchase_invoice.py | 1 | 26311 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, formatdate, flt, getdate
from frappe import _, throw
from erpnext.setup.utils import get_company_currency
import frappe.defaults
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.accounts.utils import get_account_currency, get_fiscal_year
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_billed_amount_based_on_po
from erpnext.controllers.stock_controller import get_warehouse_account
from erpnext.accounts.general_ledger import make_gl_entries, merge_similar_entries, delete_gl_entries
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseInvoice(BuyingController):
def __init__(self, arg1, arg2=None):
super(PurchaseInvoice, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'billed_amt',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_billed',
'target_ref_field': 'amount',
'source_field': 'amount',
'percent_join_field': 'purchase_order',
'overflow_type': 'billing'
}]
def validate(self):
if not self.is_opening:
self.is_opening = 'No'
super(PurchaseInvoice, self).validate()
if not self.is_return:
self.po_required()
self.pr_required()
self.validate_supplier_invoice()
# validate cash purchase
if (self.is_paid == 1):
self.validate_cash()
self.check_conversion_rate()
self.validate_credit_to_acc()
self.clear_unallocated_advances("Purchase Invoice Advance", "advances")
self.check_for_closed_status()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", "qty")
self.set_expense_account(for_validate=True)
self.set_against_expense_account()
self.validate_write_off_account()
self.validate_multiple_billing("Purchase Receipt", "pr_detail", "amount", "items")
self.validate_fixed_asset()
self.validate_fixed_asset_account()
self.create_remarks()
self.set_status()
def validate_cash(self):
if not self.cash_bank_account and flt(self.paid_amount):
frappe.throw(_("Cash or Bank Account is mandatory for making payment entry"))
if flt(self.paid_amount) + flt(self.write_off_amount) \
- flt(self.grand_total) > 1/(10**(self.precision("base_grand_total") + 1)):
frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total"""))
def create_remarks(self):
if not self.remarks:
if self.bill_no and self.bill_date:
self.remarks = _("Against Supplier Invoice {0} dated {1}").format(self.bill_no,
formatdate(self.bill_date))
else:
self.remarks = _("No Remarks")
def set_missing_values(self, for_validate=False):
if not self.credit_to:
self.credit_to = get_party_account("Supplier", self.supplier, self.company)
if not self.due_date:
self.due_date = get_due_date(self.posting_date, "Supplier", self.supplier, self.company)
super(PurchaseInvoice, self).set_missing_values(for_validate)
def check_conversion_rate(self):
default_currency = get_company_currency(self.company)
if not default_currency:
throw(_('Please enter default currency in Company Master'))
if (self.currency == default_currency and flt(self.conversion_rate) != 1.00) or not self.conversion_rate or (self.currency != default_currency and flt(self.conversion_rate) == 1.00):
throw(_("Conversion rate cannot be 0 or 1"))
def validate_credit_to_acc(self):
account = frappe.db.get_value("Account", self.credit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if account.report_type != "Balance Sheet":
frappe.throw(_("Credit To account must be a Balance Sheet account"))
if self.supplier and account.account_type != "Payable":
frappe.throw(_("Credit To account must be a Payable account"))
self.party_account_currency = account.account_currency
def check_for_closed_status(self):
check_list = []
pc_obj = frappe.get_doc('Purchase Common')
for d in self.get('items'):
if d.purchase_order and not d.purchase_order in check_list and not d.purchase_receipt:
check_list.append(d.purchase_order)
pc_obj.check_for_closed_status('Purchase Order', d.purchase_order)
def validate_with_previous_doc(self):
super(PurchaseInvoice, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "po_detail",
"compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Purchase Receipt": {
"ref_dn_field": "purchase_receipt",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Receipt Item": {
"ref_dn_field": "pr_detail",
"compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Purchase Order", "purchase_order", "po_detail"],
["Purchase Receipt", "purchase_receipt", "pr_detail"]
])
def validate_warehouse(self):
if self.update_stock:
for d in self.get('items'):
if not d.warehouse:
frappe.throw(_("Warehouse required at Row No {0}").format(d.idx))
super(PurchaseInvoice, self).validate_warehouse()
def set_expense_account(self, for_validate=False):
auto_accounting_for_stock = cint(frappe.defaults.get_global_default("auto_accounting_for_stock"))
if auto_accounting_for_stock:
stock_not_billed_account = self.get_company_default("stock_received_but_not_billed")
stock_items = self.get_stock_items()
if self.update_stock:
self.validate_warehouse()
warehouse_account = get_warehouse_account()
for item in self.get("items"):
# in case of auto inventory accounting,
# expense account is always "Stock Received But Not Billed" for a stock item
# except epening entry, drop-ship entry and fixed asset items
if auto_accounting_for_stock and item.item_code in stock_items \
and self.is_opening == 'No' and not item.is_fixed_asset \
and (not item.po_detail or
not frappe.db.get_value("Purchase Order Item", item.po_detail, "delivered_by_supplier")):
if self.update_stock:
item.expense_account = warehouse_account[item.warehouse]["name"]
else:
item.expense_account = stock_not_billed_account
elif not item.expense_account and for_validate:
throw(_("Expense account is mandatory for item {0}").format(item.item_code or item.item_name))
def set_against_expense_account(self):
against_accounts = []
for item in self.get("items"):
if item.expense_account not in against_accounts:
against_accounts.append(item.expense_account)
self.against_expense_account = ",".join(against_accounts)
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_order:
throw(_("Purchse Order number required for Item {0}").format(d.item_code))
def pr_required(self):
stock_items = self.get_stock_items()
if frappe.db.get_value("Buying Settings", None, "pr_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_receipt and d.item_code in stock_items:
throw(_("Purchase Receipt number required for Item {0}").format(d.item_code))
def validate_write_off_account(self):
if self.write_off_amount and not self.write_off_account:
throw(_("Please enter Write Off Account"))
def check_prev_docstatus(self):
for d in self.get('items'):
if d.purchase_order:
submitted = frappe.db.sql("select name from `tabPurchase Order` where docstatus = 1 and name = %s", d.purchase_order)
if not submitted:
frappe.throw(_("Purchase Order {0} is not submitted").format(d.purchase_order))
if d.purchase_receipt:
submitted = frappe.db.sql("select name from `tabPurchase Receipt` where docstatus = 1 and name = %s", d.purchase_receipt)
if not submitted:
frappe.throw(_("Purchase Receipt {0} is not submitted").format(d.purchase_receipt))
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.extend([{
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field':'purchase_order',
# 'percent_join_field': 'prevdoc_docname',
'overflow_type': 'receipt',
'extra_cond': """ and exists(select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Purchase Order',
# 'target_parent_field': 'per_received',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'prevdoc_docname',
# 'overflow_type': 'receipt',
'extra_cond': """ and exists (select name from `tabPurchase Invoice`
where name=`tabPurchase Invoice Item`.parent and update_stock=1 and is_return=1)"""
}
])
def validate_purchase_receipt_if_update_stock(self):
if self.update_stock:
for item in self.get("items"):
if item.purchase_receipt:
frappe.throw(_("Stock cannot be updated against Purchase Receipt {0}")
.format(item.purchase_receipt))
def on_submit(self):
self.check_prev_docstatus()
self.update_status_updater_args()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
if not self.is_return:
self.update_against_document_in_jv()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.update_billing_status_in_pr()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
if self.update_stock == 1:
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
# this sequence because outstanding may get -negative
self.make_gl_entries()
self.update_project()
self.update_fixed_asset()
def update_fixed_asset(self):
for d in self.get("items"):
if d.is_fixed_asset:
asset = frappe.get_doc("Asset", d.asset)
if self.docstatus==1:
asset.purchase_invoice = self.name
asset.purchase_date = self.posting_date
asset.supplier = self.supplier
else:
asset.purchase_invoice = None
asset.supplier = None
asset.flags.ignore_validate_update_after_submit = True
asset.save()
def make_gl_entries(self, repost_future_gle=True):
if not self.grand_total:
return
gl_entries = self.get_gl_entries()
if gl_entries:
update_outstanding = "No" if (cint(self.is_paid) or self.write_off_account) else "Yes"
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
update_outstanding_amt(self.credit_to, "Supplier", self.supplier,
self.doctype, self.return_against if cint(self.is_return) else self.name)
if repost_future_gle and cint(self.update_stock) and self.auto_accounting_for_stock:
from erpnext.controllers.stock_controller import update_gl_entries_after
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
elif self.docstatus == 2 and cint(self.update_stock) and self.auto_accounting_for_stock:
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
self.auto_accounting_for_stock = \
cint(frappe.defaults.get_global_default("auto_accounting_for_stock"))
self.stock_received_but_not_billed = self.get_company_default("stock_received_but_not_billed")
self.expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
self.negative_expense_to_be_booked = 0.0
gl_entries = []
self.make_supplier_gl_entry(gl_entries)
self.make_item_gl_entries(gl_entries)
self.make_tax_gl_entries(gl_entries)
gl_entries = merge_similar_entries(gl_entries)
self.make_payment_gl_entries(gl_entries)
self.make_write_off_gl_entry(gl_entries)
return gl_entries
def make_supplier_gl_entry(self, gl_entries):
if self.grand_total:
# Didnot use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(self.grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.against_expense_account,
"credit": grand_total_in_company_currency,
"credit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else self.grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
def make_item_gl_entries(self, gl_entries):
# item gl entries
stock_items = self.get_stock_items()
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
warehouse_account = get_warehouse_account()
for item in self.get("items"):
if flt(item.base_net_amount):
account_currency = get_account_currency(item.expense_account)
if self.update_stock and self.auto_accounting_for_stock and item.item_code in stock_items:
val_rate_db_precision = 6 if cint(item.precision("valuation_rate")) <= 6 else 9
# warehouse account
warehouse_debit_amount = flt(flt(item.valuation_rate, val_rate_db_precision)
* flt(item.qty) * flt(item.conversion_factor), item.precision("base_net_amount"))
gl_entries.append(
self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"debit": warehouse_debit_amount,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"cost_center": item.cost_center,
"project": item.project
}, account_currency)
)
# Amount added through landed-cost-voucher
if flt(item.landed_cost_voucher_amount):
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_valuation,
"against": item.expense_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(item.landed_cost_voucher_amount),
"project": item.project
}))
# sub-contracting warehouse
if flt(item.rm_supp_cost):
supplier_warehouse_account = warehouse_account[self.supplier_warehouse]["name"]
gl_entries.append(self.get_gl_dict({
"account": supplier_warehouse_account,
"against": item.expense_account,
"cost_center": item.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(item.rm_supp_cost)
}, warehouse_account[self.supplier_warehouse]["account_currency"]))
else:
gl_entries.append(
self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"debit": flt(item.base_net_amount, item.precision("base_net_amount")),
"debit_in_account_currency": (flt(item.base_net_amount,
item.precision("base_net_amount")) if account_currency==self.company_currency
else flt(item.net_amount, item.precision("net_amount"))),
"cost_center": item.cost_center,
"project": item.project
}, account_currency)
)
if self.auto_accounting_for_stock and self.is_opening == "No" and \
item.item_code in stock_items and item.item_tax_amount:
# Post reverse entry for Stock-Received-But-Not-Billed if it is booked in Purchase Receipt
if item.purchase_receipt:
negative_expense_booked_in_pr = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Purchase Receipt' and voucher_no=%s and account=%s""",
(item.purchase_receipt, self.expenses_included_in_valuation))
if not negative_expense_booked_in_pr:
gl_entries.append(
self.get_gl_dict({
"account": self.stock_received_but_not_billed,
"against": self.supplier,
"debit": flt(item.item_tax_amount, item.precision("item_tax_amount")),
"remarks": self.remarks or "Accounting Entry for Stock"
})
)
self.negative_expense_to_be_booked += flt(item.item_tax_amount, \
item.precision("item_tax_amount"))
def make_tax_gl_entries(self, gl_entries):
# tax table gl entries
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Total", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
dr_or_cr = "debit" if tax.add_deduct_tax == "Add" else "credit"
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.supplier,
dr_or_cr: tax.base_tax_amount_after_discount_amount,
dr_or_cr + "_in_account_currency": tax.base_tax_amount_after_discount_amount \
if account_currency==self.company_currency \
else tax.tax_amount_after_discount_amount,
"cost_center": tax.cost_center
}, account_currency)
)
# accumulate valuation tax
if self.is_opening == "No" and tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
if self.auto_accounting_for_stock and not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.cost_center, 0)
valuation_tax[tax.cost_center] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
if self.is_opening == "No" and self.negative_expense_to_be_booked and valuation_tax:
# credit valuation tax amount in "Expenses Included In Valuation"
# this will balance out valuation amount included in cost of goods sold
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = self.negative_expense_to_be_booked
i = 1
for cost_center, amount in valuation_tax.items():
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = self.negative_expense_to_be_booked * (amount / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": self.expenses_included_in_valuation,
"cost_center": cost_center,
"against": self.supplier,
"credit": applicable_amount,
"remarks": self.remarks or "Accounting Entry for Stock"
})
)
i += 1
if self.update_stock and valuation_tax:
for cost_center, amount in valuation_tax.items():
gl_entries.append(
self.get_gl_dict({
"account": self.expenses_included_in_valuation,
"cost_center": cost_center,
"against": self.supplier,
"credit": amount,
"remarks": self.remarks or "Accounting Entry for Stock"
})
)
def make_payment_gl_entries(self, gl_entries):
# Make Cash GL Entries
if cint(self.is_paid) and self.cash_bank_account and self.paid_amount:
bank_account_currency = get_account_currency(self.cash_bank_account)
# CASH, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.cash_bank_account,
"debit": self.base_paid_amount,
"debit_in_account_currency": self.base_paid_amount \
if self.party_account_currency==self.company_currency else self.paid_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.cash_bank_account,
"against": self.supplier,
"credit": self.base_paid_amount,
"credit_in_account_currency": self.base_paid_amount \
if bank_account_currency==self.company_currency else self.paid_amount
}, bank_account_currency)
)
def make_write_off_gl_entry(self, gl_entries):
# writeoff account includes petty difference in the invoice amount
# and the amount that is paid
if self.write_off_account and flt(self.write_off_amount):
write_off_account_currency = get_account_currency(self.write_off_account)
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.write_off_account,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.supplier,
"credit": flt(self.base_write_off_amount),
"credit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center
})
)
def on_cancel(self):
self.check_for_closed_status()
self.update_status_updater_args()
if not self.is_return:
from erpnext.accounts.utils import unlink_ref_doc_from_payment_entries
if frappe.db.get_single_value('Accounts Settings', 'unlink_payment_on_cancellation_of_invoice'):
unlink_ref_doc_from_payment_entries(self.doctype, self.name)
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.update_billing_status_in_pr()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
if self.update_stock == 1:
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
self.update_project()
self.update_fixed_asset()
frappe.db.set(self, 'status', 'Cancelled')
def update_project(self):
project_list = []
for d in self.items:
if d.project and d.project not in project_list:
project = frappe.get_doc("Project", d.project)
project.flags.dont_sync_tasks = True
project.update_purchase_costing()
project.save()
project_list.append(d.project)
def validate_supplier_invoice(self):
if self.bill_date:
if getdate(self.bill_date) > getdate(self.posting_date):
frappe.throw(_("Supplier Invoice Date cannot be greater than Posting Date"))
if self.bill_no:
if cint(frappe.db.get_single_value("Accounts Settings", "check_supplier_invoice_uniqueness")):
fiscal_year = get_fiscal_year(self.posting_date, company=self.company, as_dict=True)
pi = frappe.db.sql('''select name from `tabPurchase Invoice`
where
bill_no = %(bill_no)s
and name != %(name)s
and docstatus < 2
and posting_date between %(year_start_date)s and %(year_end_date)s''', {
"bill_no": self.bill_no,
"name": self.name,
"year_start_date": fiscal_year.year_start_date,
"year_end_date": fiscal_year.year_end_date
})
if pi:
pi = pi[0][0]
frappe.throw(_("Supplier Invoice No exists in Purchase Invoice {0}".format(pi)))
def update_billing_status_in_pr(self, update_modified=True):
updated_pr = []
for d in self.get("items"):
if d.pr_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where pr_detail=%s and docstatus=1""", d.pr_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Purchase Receipt Item", d.pr_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_pr.append(d.purchase_receipt)
elif d.po_detail:
updated_pr += update_billed_amount_based_on_po(d.po_detail, update_modified)
for pr in set(updated_pr):
frappe.get_doc("Purchase Receipt", pr).update_billing_percentage(update_modified=update_modified)
def validate_fixed_asset_account(self):
for d in self.get('items'):
if d.is_fixed_asset:
account_type = frappe.db.get_value("Account", d.expense_account, "account_type")
if account_type != 'Fixed Asset':
frappe.throw(_("Row {0}# Account must be of type 'Fixed Asset'").format(d.idx))
def on_recurring(self, reference_doc):
self.due_date = None
@frappe.whitelist()
def make_debit_note(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Invoice", source_name, target_doc)
@frappe.whitelist()
def get_fixed_asset_account(asset, account=None):
if account:
if frappe.db.get_value("Account", account, "account_type") != "Fixed Asset":
account=None
if not account:
asset_category, company = frappe.db.get_value("Asset", asset, ["asset_category", "company"])
account = frappe.db.get_value("Asset Category Account",
filters={"parent": asset_category, "company_name": company}, fieldname="fixed_asset_account")
return account
| gpl-3.0 | -4,062,770,884,894,112,000 | 37.749632 | 184 | 0.689559 | false | 3.18073 | false | false | false |
npilon/planterbox | planterbox/decorators.py | 1 | 2059 | """Decorators used when building a package of planterbox features to define
steps and hooks
Also some private functions used by those decorators.
"""
from functools import partial
import logging
import re
from six import (
string_types,
)
log = logging.getLogger('planterbox')
EXAMPLE_TO_FORMAT = re.compile(r'<(.+?)>')
FEATURE_NAME = re.compile(r'\.feature(?:\:[\d,]+)?$')
def make_step(pattern, multiline, fn):
"""Inner decorator for making a function usable as a step."""
planterbox_prefix = r'^\s*(?:Given|And|When|Then|But)\s+'
planterbox_patterns = getattr(fn, 'planterbox_patterns', [])
if multiline:
if isinstance(multiline, string_types):
pattern = pattern + r'\n(?P<{}>(?:.|\n)+)'.format(multiline)
else:
pattern = pattern + r'\n((?:.|\n)+)'
planterbox_patterns.append(
re.compile(planterbox_prefix + pattern, re.IGNORECASE))
fn.planterbox_patterns = planterbox_patterns
return fn
def step(pattern, multiline=False):
"""Decorate a function with a pattern so it can be used as a step.
Optional arguments:
- multiline: If true, this step-pattern will be turned into a multiline
pattern. This adds a regular expression to the end that captures all
remaining lines as a single group. If a string, that string will be used
as the name of the multiline group.
"""
return partial(make_step, pattern, multiline)
def make_hook(timing, stage, fn):
"""Inner decorator for making a function usable as a hook."""
planterbox_hook_timing = getattr(fn, 'planterbox_hook_timing', set())
planterbox_hook_timing.add((timing, stage))
fn.planterbox_hook_timing = planterbox_hook_timing
return fn
def hook(timing, stage):
"""Register a function as a hook to be run before or after """
if timing not in ('before', 'after'):
raise ValueError(timing)
if stage not in ('feature', 'scenario', 'step', 'error', 'failure'):
raise ValueError(stage)
return partial(make_hook, timing, stage)
| mit | -6,669,727,340,165,458,000 | 29.731343 | 78 | 0.668771 | false | 3.771062 | false | false | false |
rahimnathwani/measure-anything | project/auth/models.py | 1 | 6772 | from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask.ext.login import UserMixin, AnonymousUserMixin
from .. import db, login_manager
class Permission:
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (0x00, True),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
connections = db.relationship('Connection',
backref=db.backref('user', lazy='joined'), cascade="all")
estimates = db.relationship("Estimate", backref='user')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def to_json(self):
json_user = {
'member_since': self.member_since,
'last_seen': self.last_seen,
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.email
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
class Connection(db.Model):
__tablename__ = "connections"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
oauth_provider = db.Column(db.String(255))
oauth_id = db.Column(db.String(255))
oauth_token = db.Column(db.String(255))
oauth_secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
full_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit | -7,012,245,458,600,389,000 | 31.557692 | 73 | 0.603219 | false | 3.834655 | true | false | false |
eubr-bigsea/tahiti | migrations/versions/1d7c21b6c7d2_add_keras_core_layer_operation_reshape.py | 1 | 10248 | # -*- coding: utf-8 -*-
"""Add Keras Core Layer Operation Reshape
Revision ID: 1d7c21b6c7d2
Revises: 4a4b7df125b7
Create Date: 2018-11-01 10:26:22.659859
"""
from alembic import op
import sqlalchemy as sa
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '1d7c21b6c7d2'
down_revision = '4a4b7df125b7'
branch_labels = None
depends_on = None
KERAS_PLATAFORM_ID = 5
def _insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer), )
columns = ('operation_id', 'platform_id')
data = [
(5015, KERAS_PLATAFORM_ID),# Reshape
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation():
tb = table(
'operation',
column('id', Integer),
column('slug', String),
column('enabled', Integer),
column('type', String),
column('icon', Integer),)
columns = ('id', 'slug', 'enabled', 'type', 'icon')
data = [
(5015, "reshape", 1, 'ACTION', ''),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_category():
tb = table(
'operation_category',
column('id', Integer),
column('type', String),
column('order', Integer),
column('default_order', Integer),
)
columns = ('id', 'type', 'order', 'default_order')
data = [
(5015, "subgroup", 8, 8),# Reshape
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_category_operation():
tb = table(
'operation_category_operation',
column('operation_id', Integer),
column('operation_category_id', Integer))
columns = ('operation_category_id', 'operation_id')
data = [
#Core Layers
(5010, 5015),# Reshape
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String))
columns = ('id', 'locale', 'name', 'description')
data = [
(5015, "en", 'Reshape', ''),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port():
tb = table(
'operation_port',
column('id', Integer),
column('type', String),
column('tags', String),
column('order', Integer),
column('multiplicity', String),
column('operation_id', Integer),
column('slug', String),)
columns = ('id', 'type', 'tags', 'order', 'multiplicity', 'operation_id', 'slug')
data = [
#Reshape
(5115, 'INPUT', '', 1, 'ONE', 5015, 'input data'),
(5215, 'OUTPUT', '', 1, 'ONE', 5015, 'output data'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer))
columns = ('operation_port_id', 'operation_port_interface_id')
data = [
#Reshape
(5115, 1),
(5215, 1),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_translation():
tb = table(
'operation_port_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String))
columns = ('id', 'locale', 'name', 'description')
data = [
#Reshape
(5115, "en", 'input data', 'Input data'),
(5215, "en", 'output data', 'Output data'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form():
operation_form_table = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = ('id', 'enabled', 'order', 'category')
data = [
#Reshape - target_shape
(5132, 1, 1, 'execution'),
#Reshape - input_shape
#(5133, 1, 1, 'execution'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(operation_form_table, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String))
columns = ('id', 'locale', 'name')
data = [
#Reshape - target_shape
(5132, 'en', 'Execution'),
(5132, 'pt', 'Execução'),
#Reshape - input_shape
#(5133, 'en', 'Execution'),
#(5133, 'pt', 'Execução'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = ('operation_id', 'operation_form_id')
data = [
(5015, 41), #appearance
#Reshape - target_shape
(5015, 5132), # own execution form
#Reshape - input_shape
#(5015, 5133), # own execution form
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer), )
columns = ('id', 'name', 'type', 'required', 'order', 'default',
'suggested_widget', 'values_url', 'values', 'scope', 'form_id')
data = [
#Reshape - target_shape
(5132, 'target_shape', 'TEXT', 1, 1, None, 'text', None, None, 'EXECUTION', 5132),
#Reshape - input_shape
#(5133, 'input_shape', 'TEXT', 0, 2, None, 'text', None, None, 'EXECUTION', 5133),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = ('id', 'locale', 'label', 'help')
data = [
#Reshape - target_shape
(5132, 'en', 'Target shape', 'Tuple of integers. Does not include the batch axis. Ex.: (6,2)'),
#Reshape - input_shape
#(5133, 'en', 'input_shape', 'Arbitrary, although all dimensions in the input shaped must be fixed. '
# 'Use the keyword argument input_shape (tuple of integers, does not '
# 'include the batch axis) when using this layer as the first '
# 'layer in a model. Ex.: (12,)'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation,
'DELETE FROM operation WHERE id = 5015'),
(_insert_operation_category,
'DELETE FROM operation_category WHERE id = 5015'),
(_insert_operation_translation,
'DELETE FROM operation_translation WHERE id = 5015'),
(_insert_operation_category_operation,
'DELETE FROM operation_category_operation WHERE operation_id = 5015'),
(_insert_operation_platform,
'DELETE FROM operation_platform WHERE operation_id = 5015 AND platform_id = {}'.format(KERAS_PLATAFORM_ID)),
(_insert_operation_port,
'DELETE FROM operation_port WHERE id IN (5115, 5215)'),
(_insert_operation_port_interface_operation_port,
'DELETE FROM operation_port_interface_operation_port WHERE operation_port_id IN (5115, 5215)'),
(_insert_operation_port_translation,
'DELETE FROM operation_port_translation WHERE id IN (5115, 5215)'),
(_insert_operation_form,
'DELETE FROM operation_form WHERE id IN (5132, 5133)'),
(_insert_operation_form_field,
'DELETE FROM operation_form_field WHERE id IN (5132, 5133)'),
(_insert_operation_form_translation,
'DELETE FROM operation_form_translation WHERE id IN (5132, 5133)'),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation WHERE id IN (5132, 5133)'),
(_insert_operation_operation_form,
'DELETE FROM operation_operation_form WHERE operation_id = 5015'),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit()
| apache-2.0 | 7,580,760,651,073,423,000 | 28.268571 | 113 | 0.577314 | false | 3.694194 | false | false | false |
phtj/eddex | houdini/python_libs/houdini_ea/wrappers.py | 1 | 4084 | """
Executor classes used for executing tasks.
"""
import sys, os
import hou
from bson.binary import Binary
#Current working directory
HOU_FOLDER_PATH = os.getcwd()
#Helper functions
def _load_hip_file(hip_file_name):
"""Attempt to load a hip file in Houdini """
#Replacing "\\" with "/" is required to avoid errors in evaluating "$HIP"
hou_file_path = os.path.join(HOU_FOLDER_PATH, hip_file_name).replace("\\", "/")
try:
result = hou.hipFile.load(hou_file_path)
except hou.LoadWarning as e:
print "hou.LoadWarning exception loading hip file"
print str(e)
raise
except hou.OperationFailed as e:
print "hou.OperationFailed exception loading hip file"
print str(e)
raise
except Exception as e:
print "Exception loading hip file"
print str(e)
raise
except:
print "Unrecognised exception loading hip file"
raise
if result:
print "Warnings loading hip file: ", result
def _get_hou_node(node_path):
"""Attempt to get a node from hou """
node = hou.node(node_path)
if not node:
print "ERROR: Houdini node " + node_path + " does not exist."
raise Exception()
return node
def _cook_hou_node(node, animate = None):
"""cook a node in a houdini file"""
if animate is not None:
node.cook(force=True, frame_range=animate)
else:
node.cook(force=True)
def _set_hou_node_parameters(node, prefix, values, start_index=1):
"""set parameter values of a houdini node"""
for i, v in enumerate(values):
node.setParms({prefix+str(i+start_index):v})
def _get_hou_node_attributes(node, attribute_names):
"""get the attribute values of a houdini node (detail attributes)"""
results = []
for attribute_name in attribute_names:
result = node.geometry().attribValue(attribute_name)
results.append(result)
return results
def _temp_dir():
"""Create an empty folder. (If the folder exists, delete it.) """
temp_dir = os.path.join(os.getcwd(), "temp")
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
else:
for the_file in os.listdir(temp_dir):
file_path = os.path.join(temp_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
return temp_dir
# Functions for executing tasks
def houdini_develop(ind, hip_file_name, in_path, out_path, animate=None):
#get the genotype
genotype = ind.get_genotype()
#open the hipnc file
_load_hip_file(hip_file_name)
#set the parameters using the individual's genes
genotype_node = _get_hou_node(in_path)
_set_hou_node_parameters(genotype_node, "gene_", genotype)
#save phenotype to file
phen_file_path = os.path.join(_temp_dir(), "temp.bgeo")
phenotype_node = _get_hou_node(out_path)
phenotype_node.setParms(dict([["file",phen_file_path]]))
_cook_hou_node(phenotype_node, animate)
# get and save the phenotype
with open(phen_file_path, "rb") as f:
phenotype = f.read()
return Binary(phenotype)
def houdini_evaluate(ind, score_names, hip_file_name, in_path, out_path, animate=None):
#get the phenotype
phenotype = ind.get_phenotype()
#write the phenotype to a temporary file
phen_file_path = os.path.join(_temp_dir(), "temp.bgeo")
with open(phen_file_path, "wb") as f:
f.write(phenotype)
#open the phenotype hipnc file
_load_hip_file(hip_file_name)
#load the geometry into the phenotype node
phenotype_node = _get_hou_node(in_path)
phenotype_node.setParms(dict([["file",phen_file_path]]))
#cook the score node
score_node = _get_hou_node(out_path)
_cook_hou_node(score_node, animate)
#get and save all the scores
score_values = []
for score_name in score_names:
score_value = score_node.geometry().attribValue(score_name)
score_values.append(score_value)
return score_values
| gpl-3.0 | 4,568,971,612,252,445,000 | 33.033333 | 87 | 0.63859 | false | 3.312247 | false | false | false |
fugitifduck/exabgp | lib/exabgp/protocol/family.py | 1 | 5287 | # encoding: utf-8
"""
address.py
Created by Thomas Mangin on 2010-01-19.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import pack
from struct import unpack
# =================================================================== AFI
# http://www.iana.org/assignments/address-family-numbers/
class AFI (int):
undefined = 0x00 # internal
ipv4 = 0x01
ipv6 = 0x02
l2vpn = 0x19
Family = {
ipv4: 0x02, # socket.AF_INET,
ipv6: 0x30, # socket.AF_INET6,
l2vpn: 0x02, # l2vpn info over ipv4 session
}
names = {
'ipv4': ipv4,
'ipv6': ipv6,
'l2vpn': l2vpn,
}
def __str__ (self):
if self == 0x01:
return "ipv4"
if self == 0x02:
return "ipv6"
if self == 0x19:
return "l2vpn"
return "unknown afi %d" % self
def __repr__ (self):
return str(self)
def name (self):
if self == 0x01:
return "inet4"
if self == 0x02:
return "inet6"
if self == 0x19:
return "l2vpn"
return "unknown afi"
def pack (self):
return pack('!H',self)
@staticmethod
def unpack (data):
return AFI(unpack('!H',data)[0])
@staticmethod
def value (name):
if name == "ipv4":
return AFI.ipv4
if name == "ipv6":
return AFI.ipv6
return None
@staticmethod
def implemented_safi (afi):
if afi == 'ipv4':
return ['unicast','multicast','nlri-mpls','mpls-vpn','flow','flow-vpn']
if afi == 'ipv6':
return ['unicast','mpls-vpn','flow','flow-vpn']
if afi == 'l2vpn':
return ['vpls']
return []
@classmethod
def fromString (cls, string):
return cls.names.get(string,cls.undefined)
# =================================================================== SAFI
# http://www.iana.org/assignments/safi-namespace
class SAFI (int):
undefined = 0 # internal
unicast = 1 # [RFC4760]
multicast = 2 # [RFC4760]
# deprecated = 3 # [RFC4760]
nlri_mpls = 4 # [RFC3107]
# mcast_vpn = 5 # [draft-ietf-l3vpn-2547bis-mcast-bgp] (TEMPORARY - Expires 2008-06-19)
# pseudowire = 6 # [draft-ietf-pwe3-dynamic-ms-pw] (TEMPORARY - Expires 2008-08-23) Dynamic Placement of Multi-Segment Pseudowires
# encapsulation = 7 # [RFC5512]
# tunel = 64 # [Nalawade]
vpls = 65 # [RFC4761]
# bgp_mdt = 66 # [Nalawade]
# bgp_4over6 = 67 # [Cui]
# bgp_6over4 = 67 # [Cui]
# vpn_adi = 69 # [RFC-ietf-l1vpn-bgp-auto-discovery-05.txt]
evpn = 70 # [draft-ietf-l2vpn-evpn]
mpls_vpn = 128 # [RFC4364]
# mcast_bgp_mpls_vpn = 129 # [RFC2547]
# rt = 132 # [RFC4684]
rtc = 132 # [RFC4684]
flow_ip = 133 # [RFC5575]
flow_vpn = 134 # [RFC5575]
# vpn_ad = 140 # [draft-ietf-l3vpn-bgpvpn-auto]
# private = [_ for _ in range(241,254)] # [RFC4760]
# unassigned = [_ for _ in range(8,64)] + [_ for _ in range(70,128)]
# reverved = [0,3] + [130,131] + [_ for _ in range(135,140)] + [_ for _ in range(141,241)] + [255,] # [RFC4760]
names = {
'unicast': unicast,
'multicast': multicast,
'nlri-mpls': nlri_mpls,
'vpls': vpls,
'evpn': evpn,
'mpls-vpn': mpls_vpn,
'rtc': rtc,
'flow': flow_ip,
'flow-vpn': flow_vpn,
}
def name (self):
if self == 0x01:
return "unicast"
if self == 0x02:
return "multicast"
if self == 0x04:
return "nlri-mpls"
if self == 0x46:
return "evpn"
if self == 0x80:
return "mpls-vpn"
if self == 0x84:
return "rtc"
if self == 0x85:
return "flow"
if self == 0x86:
return "flow-vpn"
if self == 0x41:
return "vpls"
return "unknown safi %d" % self
def __str__ (self):
return self.name()
def __repr__ (self):
return str(self)
def pack (self):
return chr(self)
@staticmethod
def unpack (data):
return SAFI(ord(data))
def has_label (self):
return self in (self.nlri_mpls,self.mpls_vpn)
def has_rd (self):
return self in (self.mpls_vpn,) # technically self.flow_vpn and self.vpls has an RD but it is not an NLRI
@staticmethod
def value (name):
if name == "unicast":
return 0x01
if name == "multicast":
return 0x02
if name == "nlri-mpls":
return 0x04
if name == "mpls-vpn":
return 0x80
if name == "flow":
return 0x85
if name == "flow-vpn":
return 0x86
if name == "vpls":
return 0x41
return None
@classmethod
def fromString (cls, string):
return cls.names.get(string,cls.undefined)
def known_families ():
# it can not be a generator
families = [
(AFI(AFI.ipv4), SAFI(SAFI.unicast)),
(AFI(AFI.ipv4), SAFI(SAFI.multicast)),
(AFI(AFI.ipv4), SAFI(SAFI.nlri_mpls)),
(AFI(AFI.ipv4), SAFI(SAFI.mpls_vpn)),
(AFI(AFI.ipv4), SAFI(SAFI.flow_ip)),
(AFI(AFI.ipv4), SAFI(SAFI.flow_vpn)),
(AFI(AFI.ipv6), SAFI(SAFI.unicast)),
(AFI(AFI.ipv6), SAFI(SAFI.mpls_vpn)),
(AFI(AFI.ipv6), SAFI(SAFI.flow_ip)),
(AFI(AFI.ipv6), SAFI(SAFI.flow_vpn)),
(AFI(AFI.l2vpn), SAFI(SAFI.vpls))
]
return families
class Family (object):
def __init__ (self, afi, safi):
self.afi = AFI(afi)
self.safi = SAFI(safi)
def extensive (self):
return 'afi %s safi %s' % (self.afi,self.safi)
def __str__ (self):
return 'family %s %s' % (self.afi,self.safi)
| bsd-3-clause | 2,321,827,960,558,510,000 | 22.923077 | 142 | 0.566106 | false | 2.520019 | false | false | false |
ogdch/ckanext-meteoswiss | ckanext/meteoswiss/harvesters/meteoswissharvester.py | 1 | 13076 | #n -*- coding: utf-8 -*-
import json
import os
import tempfile
from uuid import NAMESPACE_OID, uuid4, uuid5
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from ckan import model
from ckan.model import Session
from ckan.logic import get_action, action
from ckan.lib.munge import munge_title_to_name
from ckanext.harvest.model import HarvestObject
from ckanext.harvest.harvesters import HarvesterBase
from pylons import config
from ..helpers.metadata import MetaDataParser
import logging
log = logging.getLogger(__name__)
class MeteoswissHarvester(HarvesterBase):
'''
The harvester for meteoswiss
'''
HARVEST_USER = u'harvest'
METADATA_FILE_NAME = u'OGD@Bund_Metadaten_MeteoSchweiz_rig_V3 6.xlsx'
METADATA_FILE_PATH = (
u'ch.meteoschweiz.normwerttabellen/%s'
% METADATA_FILE_NAME
)
BUCKET_NAME = config.get('ckanext.meteoswiss.bucket_name')
AWS_ACCESS_KEY = config.get('ckanext.meteoswiss.access_key')
AWS_SECRET_KEY = config.get('ckanext.meteoswiss.secret_key')
SHEETS = (
# Sheet name # Use GM03 descriptions
(u'SMN', False),
(u'SMN-precip', False),
(u'Föhnindex', False),
(u'HomogeneDaten', False),
(u'Klimanormwerte', True),
#(u'Kamerabild', True),
)
S3_PREFIXES = {
u'SMN': 'ch.meteoschweiz.swissmetnet',
u'SMN-precip': 'ch.meteoschweiz.swissmetnet-niederschlag',
u'Föhnindex': 'ch.meteoschweiz.swissmetnet-foehnindex',
u'HomogeneDaten': 'ch.meteoschweiz.homogenereihen',
u'Klimanormwerte': 'ch.meteoschweiz.normwerttabellen',
u'Kamerabild': 'ch.meteoschweiz.kamerabilder',
}
ORGANIZATION = {
'de': {
'name': (
u'Bundesamt für Meteorologie '
u'und Klimatologie MeteoSchweiz'
),
'description': (
u'Der nationale Wetter- und Klimadienst. Messstationen, '
u'Wetterradars und Satelliten überwachen das Wetter. '
u'Aus den Messdaten erstellt MeteoSchweiz Prognosen, '
u'Warnungen und Klimaanalysen.'
),
'website': u'http://www.meteoschweiz.admin.ch/'
},
'fr': {
'name': (
u'Office fédéral de météorologie '
u'et de climatologie MétéoSuisse'
),
'description': (
u'Le service météorologique et climatologique national. '
u'A partir de l\'ensemble des stations de mesure, des '
u'radars météorologiques et des satellites MétéoSuisse '
u'élabore pronostics, alertes et analyse climatiques.'
)
},
'it': {
'name': (
u'Ufficio federale di meteorologia e '
u'climatologia MeteoSvizzera'
),
'description': (
u'Il servizio nazionale di meteorologia e climatologia. '
u'Sulla base di dati di stazioni di rilevamento, radar '
u'meteorologici e satelliti MeteoSvizzera elabora previsioni '
u'del tempo, allerte e le analisi climatologiche.'
)
},
'en': {
'name': (
u'Federal Office of Meteorology and '
u'Climatology MeteoSwiss'
),
'description': (
u'The national weather and climate service. Meteorological '
u'stations, weather radars and satellites monitor the '
u'weather. Using the collected data, MeteoSwiss generates '
u'forecasts, warnings and climate analyses.'
)
}
}
GROUPS = {
u'de': [u'Raum und Umwelt'],
u'fr': [u'Espace et environnement'],
u'it': [u'Territorio e ambiente'],
u'en': [u'Territory and environment']
}
def _get_s3_bucket(self):
'''
Create an S3 connection to the department bucket
'''
if not hasattr(self, '_bucket'):
try:
conn = S3Connection(self.AWS_ACCESS_KEY, self.AWS_SECRET_KEY)
self._bucket = conn.get_bucket(self.BUCKET_NAME)
except Exception, e:
log.exception(e)
raise e
return self._bucket
def _fetch_metadata_file(self):
'''
Fetching the Excel metadata file from the S3 Bucket and save on disk
'''
try:
temp_dir = tempfile.mkdtemp()
metadata_file = Key(self._get_s3_bucket())
metadata_file.key = self.METADATA_FILE_PATH
metadata_file_path = os.path.join(
temp_dir,
self.METADATA_FILE_NAME
)
metadata_file.get_contents_to_filename(metadata_file_path)
return metadata_file_path
except Exception, e:
log.exception(e)
raise
def _get_s3_resources(self, resources, s3_prefix):
'''
Lookup all files on S3, an match them with meta descriptions
'''
result = []
for key in self._get_s3_bucket().list(s3_prefix):
path = key.name.split('/')
# Skip metadata file
if key.name == self.METADATA_FILE_PATH:
continue
if len(path) >= 2 and path[0] == s3_prefix and key.size > 0:
url = key.generate_url(0, query_auth=False, force_http=True)
name = os.path.basename(key.name)
data = {
u'url': url,
u'name': name,
u'format': self._guess_format(name),
}
description = self._description_lookup(resources, name)
if description:
data.update({u'description': description})
result.append(data)
return result
def _guess_format(self, path):
return os.path.splitext(path.lower())[1][1:]
def _description_lookup(self, resources, filename):
'''
Check if metafile declared a description to this resource
'''
basename, ext = os.path.splitext(filename)
for resource in resources:
if basename in resource.get('id', ''):
return resource.get('description')
if basename in resource.get('Standort', ''):
return resource.get('description')
def info(self):
return {
'name': 'meteoswiss',
'title': 'Meteoswiss',
'description': 'Harvests the meteoswiss data',
'form_config_interface': 'Text'
}
def gather_stage(self, harvest_job):
log.debug('In Meteoswiss gather_stage')
file_path = self._fetch_metadata_file()
ids = []
for sheet_name, use_gm03_desc in self.SHEETS:
log.debug('Gathering %s' % sheet_name)
parser = MetaDataParser(file_path)
metadata = parser.parse_sheet(sheet_name, use_gm03_desc)
metadata['translations'].extend(self._metadata_term_translations())
metadata['sheet_name'] = sheet_name
obj = HarvestObject(
job=harvest_job,
content=json.dumps(metadata)
)
obj.save()
ids.append(obj.id)
return ids
def fetch_stage(self, harvest_object):
log.debug('In Meteoswiss fetch_stage')
package_dict = json.loads(harvest_object.content)
sheet_name = package_dict.get('sheet_name')
s3_prefix = self.S3_PREFIXES.get(sheet_name)
if s3_prefix:
log.debug('Loading S3 Resources for %s' % sheet_name)
package_dict['resources'] = self._get_s3_resources(
package_dict.get('resources', []),
s3_prefix
)
harvest_object.content = json.dumps(package_dict)
harvest_object.save()
return True
def _create_uuid(self, name=None):
'''
Create a new SHA-1 uuid for a given name or a random id
'''
if name:
new_uuid = uuid5(NAMESPACE_OID, str(name))
else:
new_uuid = uuid4()
return unicode(new_uuid)
def import_stage(self, harvest_object):
log.debug('In Meteoswiss import_stage')
if not harvest_object:
log.error('No harvest object received')
return False
try:
package_dict = json.loads(harvest_object.content)
user = model.User.get(self.HARVEST_USER)
context = {
'model': model,
'session': Session,
'user': self.HARVEST_USER
}
package_dict['id'] = self._create_uuid(package_dict.get('id'))
# Find or create group the dataset should get assigned to
package_dict['groups'] = self._find_or_create_groups(context)
# Find or create the organization
# the dataset should get assigned to
package_dict['owner_org'] = self._find_or_create_organization(
context
)
# because license_url does not exist, we save it in extras for now
extras = []
if 'licence_url' in package_dict:
extras.append(('license_url', package_dict['licence_url']))
elif 'license_url' in package_dict:
extras.append(('license_url', package_dict['license_url']))
package_dict['extras'] = extras
log.debug('Extras %s' % extras)
# Never import state from data source!
if 'state' in package_dict:
del package_dict['state']
# Split tags
tags = package_dict.get('tags', '').split(',')
tags = [tag.strip() for tag in tags]
if '' not in tags and '(tbd)' not in tags:
package_dict['tags'] = tags
else:
del package_dict['tags']
package = model.Package.get(package_dict['id'])
model.PackageRole(
package=package,
user=user,
role=model.Role.ADMIN
)
#log.debug('Save or update package %s' % (package_dict['name'],))
self._create_or_update_package(package_dict, harvest_object)
log.debug('Save or update term translations')
self._submit_term_translations(context, package_dict)
Session.commit()
except Exception, e:
log.exception(e)
raise e
return True
def _find_or_create_groups(self, context):
group_name = self.GROUPS['de'][0]
data_dict = {
'id': group_name,
'name': munge_title_to_name(group_name),
'title': group_name
}
try:
group = get_action('group_show')(context, data_dict)
except:
group = get_action('group_create')(context, data_dict)
log.info('created the group ' + group['id'])
group_ids = []
group_ids.append(group['id'])
return group_ids
def _find_or_create_organization(self, context):
try:
data_dict = {
'permission': 'edit_group',
'id': munge_title_to_name(self.ORGANIZATION['de']['name']),
'name': munge_title_to_name(self.ORGANIZATION['de']['name']),
'title': self.ORGANIZATION['de']['name'],
'description': self.ORGANIZATION['de']['description'],
'extras': [
{
'key': 'website',
'value': self.ORGANIZATION['de']['website']
}
]
}
organization = get_action('organization_show')(context, data_dict)
except:
organization = get_action('organization_create')(
context,
data_dict
)
return organization['id']
def _metadata_term_translations(self):
'''
Generate term translatations for organizations
'''
try:
translations = []
for lang, org in self.ORGANIZATION.items():
if lang != 'de':
for field in ['name', 'description']:
translations.append({
'lang_code': lang,
'term': self.ORGANIZATION['de'][field],
'term_translation': org[field]
})
return translations
except Exception, e:
log.exception(e)
raise
def _submit_term_translations(self, context, package_dict):
for translation in package_dict['translations']:
action.update.term_translation_update(context, translation)
| agpl-3.0 | -5,317,946,303,690,557,000 | 32.14467 | 79 | 0.532353 | false | 3.88314 | false | false | false |
Morgan-Stanley/hobbes | scripts/fregion.py | 1 | 33452 | #!/usr/bin/env python
########################################################
#
# fregion.py : read structured data files
#
# to load a file from the path P into the variable f:
# f = fregion.FRegion(P)
#
# to read the stored field 'x' out of f:
# f.x
#
# to read the 'metadata' for the field 'x' (type and offset details):
# meta(f).x
#
# There are 2 ways to add reading support for a custom user type:
# 1.
# fregion.FRegion.addType("MyTypeName", lambda renv, td, repty: makeMyTypeNameReader(renv,td,repty))
# where:
# 'renv' will be the "reader environment" (necessary for any call to makeReader)
# 'td' will be the full type description where "MyTypeName" appears at root (e.g. for 'fileref', App(Prim('fileref', ...), [Arr(Prim('char'))]))
# 'repty' will be the determined 'representation type' (which can also be determined through 'td')
# and:
# the returned 'reader' must be a class with a "read" function like:
# def read(self,m,o):
# where
# 'm' will give access to the memory for the file being read out of
# 'o' will be the memory offset where the value to be read is placed
# and:
# the returned value may be whatever the application decides is sensible
#
# 2. Just use the decorator RegReader like:
# @RegReader("MyTypeName")
# class MyTypeReader:
# def __init__(self, renv, ty, repty):
# pass
# def read(self, m, offset):
# pass
#
########################################################
import os
import mmap
import struct
import math
import datetime
import uuid
import base64
#######
#
# useful tools
#
#######
class Loader:
"""
Lazy loading data from file
"""
def __init__(self, fn, *args, **kw):
self.fn = fn
self.args = args
self.kw = kw
def __getitem__(self, k):
if self.fn:
return self.fn(*self.args, **self.kw)[k]
return None
def __call__(self):
if self.fn:
v = self.fn(*self.args, **self.kw)
if isinstance(v, ArrReaderGenerator):
return v()
return v
return None
@property
def value(self):
if self.fn:
return self.fn(*self.args, **self.kw)
return None
def __str__(self):
return "{}".format(self.fn(*self.args, **self.kw))
def reader(self):
return self.fn.im_self
def LazyRead(enable):
"""
decorator to config lazy read function
"""
def apply(func):
def wrapper(*args, **kw):
return Loader(func, *args, **kw)
if enable:
return wrapper
else:
return func
return apply
#######
#
# Type Descriptions
#
#######
class Prim:
def __init__(self, name, rep):
self.name = name
self.rep = rep
def __eq__(self,x): return isinstance(x,Prim) and self.name==x.name and self.rep==x.rep
def __repr__(self): return '()' if self.name=="unit" else self.name
class Var:
def __init__(self, name):
self.name = name
def __eq__(self,x): return isinstance(x,Var) and self.name==x.name
def __repr__(self): return self.name
class FixedArr:
def __init__(self, ty, tlen):
self.ty = ty
self.tlen = tlen
def __eq__(self,x): return isinstance(x,FixedArr) and self.ty==x.ty and self.tlen==x.tlen
def __repr__(self): return '[:' + str(self.ty) + '|' + str(self.tlen) + ':]'
class Arr:
def __init__(self, ty):
self.ty = ty
def __eq__(self,x): return isinstance(x,Arr) and self.ty==x.ty
def __repr__(self): return '[' + str(self.ty) + ']'
class Variant:
def __init__(self, ctors):
self.ctors = ctors
def __eq__(self,x): return isinstance(x,Variant) and self.ctors==x.ctors
def __repr__(self):
if (len(self.ctors) == 0):
return 'void'
elif (self.isSum()):
return self.showAsSum()
else:
return self.showAsVariant()
def isSum(self):
return len(self.ctors)>0 and self.ctors[0][0][0] == '.'
def showAsSum(self):
s = '('
s += str(self.ctors[0][2])
for i in range(1, len(self.ctors)):
s += '+' + str(self.ctors[i][2])
s += ')'
return s
def showAsVariant(self):
s = '|'
s += self.descCtor(self.ctors[0])
for i in range(1,len(self.ctors)):
s += ', '
s += self.descCtor(self.ctors[i])
s += '|'
return s
def descCtor(self, ctor):
return ctor[0] + ':' + str(ctor[2])
class Struct:
def __init__(self, fields):
self.fields = fields
def __eq__(self,x): return isinstance(x,Struct) and self.fields==x.fields
def __repr__(self):
if (len(self.fields) == 0):
return '()'
elif (self.isTuple()):
return self.showAsTuple()
else:
return self.showAsStruct()
def isTuple(self):
return len(self.fields)>0 and self.fields[0][0][0] == '.'
def showAsTuple(self):
s = '('
s += str(self.fields[0][2])
for i in range(1,len(self.fields)):
s += '*' + str(self.fields[i][2])
s += ')'
return s
def showAsStruct(self):
s = '{'
s += self.descField(self.fields[0])
for i in range(1,len(self.fields)):
s += ', '
s += self.descField(self.fields[i])
s += '}'
return s
def descField(self, field):
return field[0] + ':' + str(field[2])
class TLong:
def __init__(self, n):
self.n = n
def __eq__(self,x): return isinstance(x,TLong) and self.n==x.n
def __repr__(self): return str(self.n)
class App:
def __init__(self,f,args):
self.f = f
self.args = args
def __eq__(self,x): return isinstance(x,App) and self.f==x.f and self.args==x.args
def __repr__(self):
if (isinstance(self.f,Prim)):
if (self.f.name == "fileref" and len(self.args)>0):
return str(self.args[0])+"@?"
return self.showGeneric()
def showGeneric(self):
s = str(self.f) + '('
if (len(self.args)>0):
s += str(self.args[0])
for i in range(1,len(self.args)):
s += ', ' + str(self.args[i])
s += ')'
return s
class Recursive:
def __init__(self,vn,ty):
self.vn = vn
self.ty = ty
def __repr__(self):
return '^' + self.vn + '.' + str(self.ty)
class Abs:
def __init__(self,vns,ty):
self.vns = vns
self.ty = ty
def __repr__(self):
s = '\\'
if (len(self.vns)>0):
s += self.vns[0]
for i in range(1,len(self.vns)):
s += ', ' + self.vns[i]
s += '.' + str(self.ty)
return s
class TyCase:
def __init__(self, dtors):
self.dtors = dtors
def apply(self,ty):
if (isinstance(ty,Prim)):
return self.dtors["prim"](ty)
elif (isinstance(ty,Var)):
return self.dtors["var"](ty)
elif (isinstance(ty,FixedArr)):
return self.dtors["farr"](ty)
elif (isinstance(ty,Arr)):
return self.dtors["arr"](ty)
elif (isinstance(ty,Variant)):
return self.dtors["variant"](ty)
elif (isinstance(ty,Struct)):
return self.dtors["struct"](ty)
elif (isinstance(ty,TLong)):
return self.dtors["long"](ty)
elif (isinstance(ty,App)):
return self.dtors["app"](ty)
elif (isinstance(ty,Recursive)):
return self.dtors["rec"](ty)
elif (isinstance(ty,Abs)):
return self.dtors["abs"](ty)
else:
raise Exception("Can't deconstruct unknown type description")
def fail(msg):
raise Exception(msg)
def dictWithout(m,k):
r=m.copy()
r.pop(k,None)
return r
def dictWithouts(m,ks):
r=m.copy()
for k in ks:
r.pop(k,None)
return r
def addFreeVar(m,vn):
m[vn]=None
def freeVarsInto(m,ty):
tyDisp = {
"prim": lambda p: None,
"var": lambda v: addFreeVar(m,v.name),
"farr": lambda fa: (freeVarsInto(m,fa.ty), freeVarsInto(m,fa.tlen)),
"arr": lambda a: freeVarsInto(m,a.ty),
"variant": lambda v: [freeVarsInto(m,ctor[2]) for ctor in v.ctors],
"struct": lambda s: [freeVarsInto(m,field[2]) for field in s.fields],
"long": lambda n: None,
"app": lambda a: (freeVarsInto(m,a.f), [freeVarsInto(m,arg) for arg in f.args]),
"rec": lambda r: m.update(dictWithout(freeVars(r.ty),r.vn)),
"abs": lambda a: m.update(dictWithouts(freeVars(a.ty),a.vns))
}
return TyCase(tyDisp).apply(ty)
def freeVars(ty):
m={}
freeVarsInto(m,ty)
return m
def dictFreeVars(m):
lm={}
for n, ty in m.items():
freeVarsInto(lm,ty)
return lm
def freeName(m):
vn='t0'
n=0
while (True):
if (not(vn in m)):
break
else:
n+=1
vn='t'+str(n)
return vn
def substituteInVariant(m,v):
ctors=[]
for ctor in v.ctors:
ctors.append((ctor[0], ctor[1], substitute(m, ctor[2])))
return Variant(ctors)
def substituteInStruct(m,s):
fields=[]
for field in s.fields:
fields.append((field[0], field[1], substitute(m,field[2])))
return Struct(fields)
def substituteInApp(m,a):
args=[]
for ty in a.args:
args.append(substitute(m,ty))
return App(substitute(m,a.f),args)
def substituteInRec(m,r):
lm=dictWithout(m,r.vn)
fvs=dictFreeVars(lm)
if (r.vn in fvs):
nn=freeName(fvs)
return Recursive(nn, substitute(lm, substitute({r.vn:Var(nn)},r.ty)))
else:
return Recursive(r.vn, substitute(lm, r.ty))
def substituteInAbs(m,a):
lm=dictWithouts(m,a.vns)
fvs=dictFreeVars(lm)
vns=[]
for vn in a.vns:
if (vn in fvs):
nn=freeName(lm)
lm[vn] = Var(nn)
vns.append(nn)
else:
vns.append(vn)
if (vns!=a.vns):
return Abs(vns, substitute(lm,a.ty))
else:
return Abs(a.vns, substitute(lm,a.ty))
def substitute(m,ty):
tyDisp = {
"prim": lambda p: Prim(p.name,substitute(m,p.rep)) if (p.rep != None) else p,
"var": lambda v: m[v.name] if (v.name in m.keys()) else v,
"farr": lambda fa: FixedArr(substitute(m,fa.ty), substitute(m,fa.tlen)),
"arr": lambda a: Arr(substitute(m,a.ty)),
"variant": lambda v: substituteInVariant(m,v),
"struct": lambda s: substituteInStruct(m,s),
"long": lambda n: n,
"app": lambda a: substituteInApp(m,a),
"rec": lambda r: substituteInRec(m,r),
"abs": lambda a: substituteInAbs(m,a)
}
return TyCase(tyDisp).apply(ty)
def expectFn(ty):
if (isinstance(ty,Prim)):
if (ty.rep == None):
if (ty.name == "fileref"):
return Abs(["t"], Prim("long",None))
else:
raise Exception("Expected function representation in place of primitive: " + ty.name)
else:
return expectFn(ty.rep)
elif (isinstance(ty,Abs)):
return ty
else:
raise Exception("Expected function in place of type: " + str(ty))
def evalApp(pf, args):
f = expectFn(pf)
if (len(args)!=len(f.vns)):
raise Exception("Arity mismatch in application (expected " + str(len(f.vns)) + " arguments): " + str(App(pf,args)))
m={}
for i in range(len(f.vns)):
m[f.vns[i]] = args[i]
return substitute(m, f.ty)
#######
#
# determine memory layout of any type
#
#######
def align(x, b):
if (x % b == 0):
return x
else:
return b*(int(x/b)+1)
def alignOfStruct(s):
a=1
for field in s.fields:
a=max(a,alignOf(field[2]))
return a
def alignOfVariant(v):
a=4
for ctor in v.ctors:
a=max(a,alignOf(ctor[2]))
return a
def alignOfApp(a):
return alignOf(evalApp(a.f, a.args))
def alignOf(ty):
tyDisp = {
"prim": lambda p: 1 if (p.name == "unit") else alignOf(p.rep) if (p.rep != None) else sizeOfPrim(p),
"var": lambda v: fail("Can't determine alignment of type variable: " + v.name),
"farr": lambda fa: alignOf(fa.ty),
"arr": lambda a: fail("Can't determine alignment of variable-length array: " + str(a)),
"variant": lambda v: alignOfVariant(v),
"struct": lambda s: alignOfStruct(s),
"long": lambda n: fail("Can't get alignment of type-level number: " + str(n.n)),
"app": lambda a: alignOfApp(a),
"rec": lambda r: fail("Can't get alignment of recursive type: " + str(r)),
"abs": lambda a: fail("Can't get alignment of type-level function: " + str(a))
}
return TyCase(tyDisp).apply(ty)
def sizeOfPrim(p):
if (p.rep != None):
return sizeOf(p.rep)
else:
if (p.name == "unit"):
return 0
elif (p.name == "bool"):
return 1
elif (p.name == "byte"):
return 1
elif (p.name == "char"):
return 1
elif (p.name == "short"):
return 2
elif (p.name == "int"):
return 4
elif (p.name == "long"):
return 8
elif (p.name == "float"):
return 4
elif (p.name == "double"):
return 8
else:
raise Exception("Can't determine size of unknown primitive type: " + p.name)
def sizeOfStruct(s):
o=0
for f in s.fields:
o = align(o, alignOf(f[2])) + sizeOf(f[2])
return align(o, alignOf(s))
def sizeOfVariant(v):
a=alignOf(v)
maxsz=0
for ctor in v.ctors:
maxsz=max(maxsz,sizeOf(ctor[2]))
return align(align(4,a)+maxsz,a)
def sizeOfApp(a):
return sizeOf(evalApp(a.f, a.args))
def sizeOf(ty):
tyDisp = {
"prim": lambda p: sizeOfPrim(p),
"var": lambda v: fail("Can't determine size of type variable: " + v.name),
"farr": lambda fa: sizeOf(fa.ty)*fa.tlen.n,
"arr": lambda a: fail("Can't determine size of variable-length array: " + str(a)),
"variant": lambda v: sizeOfVariant(v),
"struct": lambda s: sizeOfStruct(s),
"long": lambda n: fail("Can't get size of type-level number: " + str(n.n)),
"app": lambda a: sizeOfApp(a),
"rec": lambda r: fail("Can't get size of recursive type: " + str(r)),
"abs": lambda a: fail("Can't get size of type-level function: " + str(a))
}
return TyCase(tyDisp).apply(ty)
#######
#
# Type Description Decoding
#
#######
# a cheap cursor
class ReadPos:
def __init__(self):
self.pos = 0
def __repr__(self): return str(self.pos)
# type descriptions
TYCTOR_PRIM = 0
TYCTOR_TVAR = 2
TYCTOR_FIXEDARR = 4
TYCTOR_ARR = 5
TYCTOR_VARIANT = 6
TYCTOR_STRUCT = 7
TYCTOR_SIZE = 11
TYCTOR_TAPP = 12
TYCTOR_RECURSIVE = 13
TYCTOR_TABS = 15
def decodeBool(d, p):
b = struct.unpack('B', d[p.pos:p.pos+1])[0]
p.pos += 1
return b != 0
def decodeInt(d, p):
n = struct.unpack('I', d[p.pos:p.pos+4])[0]
p.pos += 4
return n
def decodeLong(d, p):
n = struct.unpack('Q', d[p.pos:p.pos+8])[0]
p.pos += 8
return n
def decodeStr(d, p):
n = decodeLong(d,p)
s = str(d[p.pos:p.pos+n])
p.pos += n
return s
def decodeTypeDesc(d, p):
c = decodeInt(d,p)
if (c == TYCTOR_PRIM):
name = decodeStr(d, p)
if (decodeBool(d, p)):
return Prim(name, decodeTypeDesc(d, p))
else:
return Prim(name, None)
elif (c == TYCTOR_TVAR):
name = decodeStr(d, p)
return Var(name)
elif (c == TYCTOR_FIXEDARR):
ty = decodeTypeDesc(d, p)
tlen = decodeTypeDesc(d, p)
return FixedArr(ty, tlen)
elif (c == TYCTOR_ARR):
ty = decodeTypeDesc(d, p)
return Arr(ty)
elif (c == TYCTOR_VARIANT):
n = decodeLong(d,p)
ctors = []
for i in range(n):
name = decodeStr(d,p)
cid = decodeInt(d,p)
ty = decodeTypeDesc(d,p)
ctors.append((name,cid,ty))
return Variant(ctors)
elif (c == TYCTOR_STRUCT):
n = decodeLong(d,p)
fields = []
for i in range(n):
name = decodeStr(d,p)
cid = decodeInt(d,p)
ty = decodeTypeDesc(d,p)
fields.append((name,cid,ty))
return Struct(fields)
elif (c == TYCTOR_SIZE):
return TLong(decodeLong(d,p))
elif (c == TYCTOR_TAPP):
f = decodeTypeDesc(d,p)
n = decodeLong(d,p)
args = []
for i in range(n):
args.append(decodeTypeDesc(d,p))
return App(f,args)
elif (c == TYCTOR_RECURSIVE):
vn = decodeStr(d,p)
ty = decodeTypeDesc(d,p)
return Recursive(vn,ty)
elif (c == TYCTOR_TABS):
n = decodeLong(d,p)
vns = []
for i in range(n):
vns.append(decodeStr(d,p))
ty = decodeTypeDesc(d,p)
return Abs(vns,ty)
else:
raise Exception('Not a supported type constructor ID: ' + str(c))
#######
#
# Version updates as type transforms (where possible)
#
#######
def V1toV2Type(ty):
tyDisp = {
"prim": lambda p: p if (p.rep == None) else Prim(p.name, V1toV2Type(p.rep)),
"var": lambda v: v,
"farr": lambda fa: FixedArr(V1toV2Type(fa.ty), V1toV2Type(fa.tlen)),
"arr": lambda a: App(Prim("darray", Abs(["t"], Prim("long", None))), [V1toV2Type(a.ty)]),
"variant": lambda v: Variant([(ctor[0], ctor[1], V1toV2Type(ctor[2])) for ctor in v.ctors]),
"struct": lambda s: Struct([(field[0], field[1], V1toV2Type(field[2])) for field in s.fields]),
"long": lambda n: n,
"app": lambda a: App(V1toV2Type(a.f), [V1toV2Type(arg) for arg in a.args]),
"rec": lambda r: Recursive(r.vn, V1toV2Type(r.ty)),
"abs": lambda a: Abs(a.vns, V1toV2Type(a.ty))
}
return TyCase(tyDisp).apply(ty)
#######
#
# File envelope decoding (read page data, environment data)
#
#######
# page entry decoding
def isEnvPage(p):
return (p >> 14) == 2
def availBytes(p):
return p & 0x3FFF
# a file variable definition
class EnvEntry:
def __init__(self, offset, ty):
self.offset = offset
self.ty = ty
def __repr__(self):
return str(self.ty) + "@" + str(self.offset)
# read file metadata
class FREnvelope:
def __init__(self, fpath):
self.p = fpath
self.f = open(self.p, 'r+b')
self.m = mmap.mmap(self.f.fileno(), 0, mmap.ACCESS_READ)
# make sure that the file header is what we expect
if (struct.unpack('I', self.m[0:4])[0] != 0x10A1DB0D):
raise Exception('Not a valid structured data file: ' + self.p)
self.pageSize = struct.unpack('H', self.m[4:6])[0]
self.version = struct.unpack('H', self.m[6:8])[0]
if (self.pageSize != 4096):
raise Exception('Expected 4K page size')
if (not(self.version in [1,2])):
raise Exception('Structured data file format version ' + str(self.version) + ' not supported')
# read the page data in this file
self.pages = []
self.readPageEntries(self.pages, 8, 4096)
# read the environment data in this file
self.env = dict([])
page=0
while (page < len(self.pages)):
if (isEnvPage(self.pages[page])):
page += self.readEnvPage(self.env, page)
else:
page += 1
# if reading the old format, we need to reinterpret recorded types
if (self.version == 1):
for vn, b in self.env.items():
b.ty = V1toV2Type(b.ty)
# read page data entries into the 'pages' argument
# if there is a link to a subsequent page to read page data from, follow it
def readPageEntries(self, pages, i, o):
k = i
e = o - 8
while (k < e):
p = struct.unpack('H', self.m[k:k+2])[0]
if (p == 0):
break
pages.append(p)
k += 2
n = struct.unpack('Q', self.m[e:e+8])[0]
if (n != 0):
self.readPageEntries(pages, n*4096, (n+1)*4096)
# read environment data into the 'env' argument out of 'page'
def readEnvPage(self, env, page):
initOffset = page * 4096
offset = initOffset
while (True):
offset = self.readEnvRecord(env, offset)
pos = offset - 1
tpage = int(pos / 4096)
rpos = (pos % 4096) + 1
if (rpos == (4096 - availBytes(self.pages[tpage]))):
break
return int(math.ceil((float(offset-initOffset))/4096.0))
def readEnvRecord(self, env, offset):
vpos = struct.unpack('Q', self.m[offset:offset+8])[0]
offset += 8
vnlen = struct.unpack('Q', self.m[offset:offset+8])[0]
offset += 8
vn = str(self.m[offset:offset+vnlen])
offset += vnlen
tylen = struct.unpack('Q', self.m[offset:offset+8])[0]
offset += 8
if (len(vn) > 0 and vn[0] != '.' and tylen > 0):
env[vn] = EnvEntry(vpos, decodeTypeDesc(self.m[offset:offset+tylen], ReadPos()))
offset += tylen
return offset
#######
#
# Read structured data
#
#######
class UnitReader:
def read(self,m,offset): return None
class UnpackReader:
def __init__(self,fmt,sz):
self.fmt = fmt
self.sz = sz
def read(self,m,offset):
return struct.unpack(self.fmt,m[offset:offset+self.sz])[0]
class FArrReader:
def __init__(self, renv, ty, c):
self.c = c
self.rdr = makeReader(renv, ty)
self.esz = sizeOf(ty)
def read(self,m,offset):
r=[]
o=offset
for i in range(self.c):
r.append(self.rdr.read(m,o))
o += self.esz
return r
def tupleReaders(renv, tys):
o = 0
os = []
rs = []
for ty in tys:
o = align(o, alignOf(ty))
os.append(o)
rs.append(makeReader(renv, ty))
o += sizeOf(ty)
return (os,rs)
class TupleReader:
def __init__(self, renv, tys):
os, rs = tupleReaders(renv, tys)
self.os = os
self.rs = rs
def read(self,m,offset):
vs=[]
for i in range(len(self.os)):
vs.append(self.rs[i].read(m,offset+self.os[i]))
return tuple(vs)
class StructView:
def __init__(self, fs, foffs, vs):
self.fs = fs
self.foffs = foffs
self.vs = vs
def __repr__(self):
r = '{'
if (len(self.vs)>0):
r += self.fs[0] + '=' + str(self.vs[0])
for i in range(1,len(self.vs)):
r += ', ' + self.fs[i] + '=' + str(self.vs[i])
r += '}'
return r
def __str__(self): return self.__repr__()
def __eq__(self,other):
if (not(isinstance(other,StructView))):
return False
else:
return self.fs == other.fs and self.vs == other.vs
def __getattr__(self, attr):
return self.vs[self.foffs[attr]].value
class StructReader:
def __init__(self, renv, fs, tys):
os, rs = tupleReaders(renv, tys)
self.fs = fs
self.os = os
self.rs = rs
foffs={}
for i in range(len(self.fs)):
foffs[self.fs[i]] = i
self.foffs = foffs
def read(self,m,offset):
vs=[]
for i in range(len(self.os)):
vs.append(Loader(self.rs[i].read ,m,offset+self.os[i]))
return StructView(self.fs, self.foffs, vs)
class MaybeReader:
def __init__(self, renv, ty):
self.poff = align(4, alignOf(ty))
self.tr = UnpackReader('I', 4)
self.jr = makeReader(renv, ty)
def read(self,m,offset):
t = self.tr.read(m,offset)
if (t == 0):
return None
else:
return self.jr.read(m,offset+self.poff)
class EnumView:
def __init__(self, ns, t):
self.ns = ns
self.t = t
def __repr__(self):
return '|' + str(self.ns.get(self.t)) + '|'
class EnumReader:
def __init__(self, ctors):
self.tr = UnpackReader('I',4)
ns={}
for ctor in ctors:
ns[ctor[1]] = ctor[0]
self.ns = ns
def read(self,m,offset):
t = self.tr.read(m,offset)
return EnumView(self.ns, t)
class VariantView:
def __init__(self, cn, value):
self.cn = cn
self.v = value
@property
def value(self):
return self.v()
def __repr__(self):
if (len(self.cn)>0 and self.cn[0] == '.'):
return "|" + self.cn[2:] + "=" + str(self.v) + "|"
else:
return "|" + self.cn + "=" + str(self.v) + "|"
class VariantReader:
def __init__(self, renv, ctors):
poff=4
crs={}
cns={}
for ctor in ctors:
poff = align(poff, alignOf(ctor[2]))
crs[ctor[1]] = makeReader(renv, ctor[2])
cns[ctor[1]] = ctor[0]
self.tr = UnpackReader('I', 4)
self.poff = poff
self.crs = crs
self.cns = cns
def read(self,m,offset):
t = self.tr.read(m,offset)
return VariantView(self.cns[t], Loader(self.crs[t].read, m, offset+self.poff))
class StrReader:
def __init__(self):
self.nr = UnpackReader('Q',8)
def read(self,m,offset):
n=self.nr.read(m,offset)
return m[offset+8:offset+8+n]
class ArrReaderGenerator:
def __init__(self, m, reader, size, offset):
self.r = reader.r
self.size = size
self.offset = offset
self.m = m
self.vlen = reader.vlen
def __len__(self):
return self.size
def __call__(self):
o = self.offset
for i in xrange(0, self.size):
tv = self.get(i)
o += self.vlen
yield(tv)
def __getitem__(self, i):
if not isinstance(i, (int,long)):
raise StopIteration
return self.get(i)
def get(self, index):
if index >= self.size:
raise StopIteration
o = self.offset + self.vlen * index
return self.r.read(self.m, o)
class ArrReader:
def __init__(self,renv,ty):
self.nr = UnpackReader('Q',8)
self.r = makeReader(renv,ty)
self.vlen = sizeOf(ty)
def read(self,m,offset):
n=self.nr.read(m,offset)
return ArrReaderGenerator(m, self, n, offset+8)
class NYIReader:
def read(self,m,offset):
raise Exception("nyi")
globalTypeExts={}
def makeCustomReader(name, renv, ty, repty):
mkR = globalTypeExts.get(name)
if (mkR != None):
return mkR(renv, ty, repty)
else:
raise Exception("I don't know how to decode this type: " + str(ty))
def makePrimReader(renv, p):
if (p.name == "unit"):
return UnitReader()
elif (p.name == "bool"):
return UnpackReader('?', 1)
elif (p.name == "char"):
return UnpackReader('c', 1)
elif (p.name == "byte"):
return UnpackReader('B', 1)
elif (p.name == "short"):
return UnpackReader('H', 2)
elif (p.name == "int"):
return UnpackReader('I', 4)
elif (p.name == "long"):
return UnpackReader('Q', 8)
elif (p.name == "float"):
return UnpackReader('f', 4)
elif (p.name == "double"):
return UnpackReader('d', 8)
elif (p.rep != None):
return makeCustomReader(p.name, renv, p, p.rep)
else:
raise Exception("I don't know how to decode the primitive type: " + p.name)
def makeFArrReader(renv,fa):
return FArrReader(renv, fa.ty, fa.tlen.n)
def makeArrReader(renv,a):
if (isinstance(a.ty,Prim) and a.ty.name == "char"):
return StrReader()
else:
return ArrReader(renv,a.ty)
def makeVariantReader(renv,v):
if (len(v.ctors)==2 and v.ctors[0][0] == ".f0" and v.ctors[0][1] == 0 and isinstance(v.ctors[0][2],Prim) and v.ctors[0][2].name == "unit"):
return MaybeReader(renv,v.ctors[1][2])
elif (all(map(lambda c: isinstance(c[2],Prim) and c[2].name=="unit", v.ctors))):
return EnumReader(v.ctors)
else:
return VariantReader(renv,v.ctors)
def makeStructReader(renv,s):
if (len(s.fields) == 0):
return UnitReader()
elif (s.fields[0][0][0] == '.'): # should we read this as a tuple?
return TupleReader(renv, map(lambda f:f[2], s.fields))
else:
return StructReader(renv, map(lambda f:f[0], s.fields), map(lambda f:f[2], s.fields))
def makeAppReader(renv,app):
if (isinstance(app.f,Prim)):
return makeCustomReader(app.f.name, renv, app, evalApp(app.f, app.args))
else:
raise Exception("I don't know how to read '" + str(app) + "'")
class RecReader:
def __init__(self):
self.r = None
def read(self,m,offset):
return self.r.read(m,offset)
def makeRecReader(renv, rec):
o = renv.get(rec.vn)
r = RecReader()
renv[rec.vn] = r
r.r = makeReader(renv, rec.ty)
if (o != None):
renv[rec.vn]=o
else:
renv.pop(rec.vn, None)
return r
def makeVarReader(renv, vn):
if vn in renv:
return renv[vn]
else:
raise Exception("Can't make reader with variable not in environment: " + vn)
def makeReader(renv,ty):
readerDisp = {
"prim": lambda p: makePrimReader(renv, p),
"var": lambda v: makeVarReader(renv, v.name),
"farr": lambda fa: makeFArrReader(renv,fa),
"arr": lambda a: makeArrReader(renv,a),
"variant": lambda v: makeVariantReader(renv,v),
"struct": lambda s: makeStructReader(renv,s),
"long": lambda n: fail("Can't read type-level number: " + str(n.n)),
"app": lambda a: makeAppReader(renv,a),
"rec": lambda r: makeRecReader(renv,r),
"abs": lambda a: fail("Can't read type-level function: " + str(a))
}
return TyCase(readerDisp).apply(ty)
#######
#
# the user interface to structured data
#
#######
def formatRow(cns, cs, r):
s=''
for k in range(len(cs)-1):
s += cs[k][r].ljust(cns[k], ' ')
s += cs[len(cs)-1][r]
return s
def tableFormat(cs):
cns=[]
rc=0
for c in cs:
n = 0
rc = len(c) if rc==0 else min(rc, len(c))
for s in c:
n = max(n, len(s))
cns.append(n)
s = ''
if (rc > 0):
s = formatRow(cns, cs, 0)
for r in range(1, rc):
s += '\n' + formatRow(cns, cs, r)
return s
class FRegion:
def __init__(self, fpath):
self.rep = FREnvelope(fpath)
for vn, bind in self.rep.env.items():
bind.reader = makeReader({}, bind.ty)
@staticmethod
def addType(name, gen):
globalTypeExts[name] = gen
def __str__(self): return self.__repr__()
def __repr__(self):
vns = []
hts = []
tds = []
for vn, bind in self.rep.env.items():
vns.append(vn)
hts.append(' :: ')
tds.append(str(bind.ty))
return tableFormat([vns, hts, tds])
def __getattr__(self, attr):
b = self.rep.env.get(attr, None)
if (b == None):
raise Exception("FRegion has no field named '" + attr + "'")
else:
return b.reader.read(self.rep.m, b.offset)
class FRMeta:
def __init__(self, f): self.f = f
def __repr__(self): return repr(self.f)
def __getattr__(self, attr):
b = self.f.rep.env.get(attr, None)
if (b == None):
raise Exception("FRegion has no field named '" + attr + "'")
else:
return b
def meta(f): return FRMeta(f)
#######
#
# support common "application types" by default
#
#######
def RegReader(desc):
def newCls(cls):
FRegion.addType(desc, lambda renv, ty, repty: cls(renv, ty, repty))
return cls
return newCls
# date/time
@RegReader("datetime")
class DateTimeReader:
def __init__(self, renv, ty, repty):
self.nr = makeReader(renv, repty)
def read(self,m,o):
return datetime.datetime.fromtimestamp(self.nr.read(m,o)/1000000.0)
# file refs (within-file pointer types)
@RegReader("fileref")
class FileRefReader:
def __init__(self,renv,ty,repty):
self.refr = makeReader(renv,repty)
self.r = makeReader(renv,ty.args[0])
def read(self,m,offset):
o=self.refr.read(m,offset)
if (o==0):
return None
else:
return self.r.read(m,o)
# carrays (variable-length arrays stored with a static capacity)
FRegion.addType("carray", lambda renv, ty, repty: makeArrReader(renv, Arr(ty.args[0])))
# darrays (old style variable-length arrays stored with capacity)
@RegReader("darray")
class DArrReader:
def __init__(self,renv,ty,repty):
self.ar = makeArrReader(renv,Arr(ty.args[0]))
def read(self,m,offset):
return self.ar.read(m,offset+8)
# skip-list maps
class SLView:
def __init__(self,sl):
self.sl=sl
@staticmethod
def findNextGLEB(n, level, k):
while (not(n==None)):
sn=n.next[level]
if (sn==None or k < sn.key):
if (level==0):
return n
else:
level=level-1
elif (sn.key <= k):
n = sn
else:
return n
def __getitem__(self,k):
if (self.sl.count==0):
return None
else:
n = SLView.findNextGLEB(self.sl.root, len(self.sl.root.next)-1, k)
if (not(n == None) and n.key==k):
return n.value
else:
return None
def __contains__(self,k):
if (self.sl.count==0):
return False
else:
n=SLView.findNextGLEB(self.sl.root, len(self.sl.root.next)-1, k)
return (not(n==None) and n.key==k)
def __iter__(self):
n=self.sl.root.next[0]
while (not(n==None)):
yield (n.key,n.value)
n=n.next[0]
def __len__(self): return self.sl.count
def __str__(self): return self.__repr__()
def __repr__(self):
ks=[]
eqs=[]
vs=[]
n=self.sl.root().next[0]
while (not(n == None)):
ks.append(str(n.key))
eqs.append(' = ')
vs.append(str(n.value))
n=n.next[0]
return tableFormat([ks,eqs,vs])
@RegReader("slmap")
class SLMapReader:
def __init__(self,renv,ty,repty):
self.sr = makeReader(renv, repty)
def read(self,m,offset):
return SLView(self.sr.read(m,offset))
#uuid
class HobUUID(uuid.UUID):
def __init__(self, *args, **kwargs):
uuid.UUID.__init__(self, *args, **kwargs)
def __str__(self):
return base64.b64encode(self.bytes, '-_')[:-2] + 'A'
@staticmethod
def bytes2uuid(bs):
return HobUUID(bytes=''.join(chr(e) for e in bs))
@RegReader("uuid")
class UuidReader:
def __init__(self, renv, repty, ty):
self.nr = FArrReader(renv, Prim("byte", None), 16)
@LazyRead(True)
def read(self, m, o):
bs = self.nr.read(m,o)
return HobUUID(bytes=''.join(chr(e) for e in bs))
#######
#
# Stored sequences (fseq T n) with representation (^x.(()+((carray T n) * x@?)))@?
#
#######
class RecStream:
def __init__(self, stream):
self.nodes = []
self._reload(stream)
def _reload(self, stream):
self.data = stream
def generate_node(s):
if s[0]:
self.nodes.append(s[0])
if s[1] != None:
generate_node(s[1])
generate_node(stream)
def iter(self):
for nd in self.nodes:
for v in nd():
yield v
def __len__(self):
return sum((len(x) for x in self.nodes))
def __str__(self):
sz = 0
content = ""
for v in self.iter():
sz += 1
if sz > 10:
content += "... ... ..."
break
content += "{}. {}\n".format(sz, v)
return content
def __getitem__(self, i):
c = 0
for nd in self.nodes:
if i >= (c + len(nd)):
c += len(nd)
else:
return nd[i-c]
raise StopIteration
@RegReader("fseq")
class FSeqReader:
def __init__(self, renv, repty, ty):
self.rr = makeReader(renv, ty)
def read(self, m, o):
return RecStream(self.rr.read(m,o))
| apache-2.0 | -8,184,452,706,866,680,000 | 25.073266 | 152 | 0.582207 | false | 2.864286 | false | false | false |
tiagoams/blueC_fluxes | int_adv.py | 1 | 2848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
budget_term_densities
Calculates density maps for nutrient budget terms from NEMO-ERSEM output.
NERC-DEFRA SSB-BlueC projects
Created on Tue Jan 24 09:18:52 2017
@author: TAMS00
"""
#import pandas as pd
import netCDF4
import xarray as xr
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, cm
import argparse
if (('Windows' in os.environ['OSTYPE']) and
(os.environ['COMPUTERNAME']=='PC4447')):
base='c:/Users/tams00/Documents/nerc_ssb/c_fluxes/AMM7-HINDCAST-v0-erosion'
else:
base='/nerc/n01/n01/momme/AMM7-HINDCAST-v0-erosion'
modelpaths=[os.path.join(base+'/1981/01/','amm7_1d_19810101_19810131_grid_T.nc')]#,
#os.path.join(base+'/1981/02/','amm7_1d_19810201_19810228_grid_T.nc')]
#os.path.join(base+'/1981/01/','restart_trc.nc'),
#modelvars=[['Y4_fdetrc_result']]
#modelvars=[['net_PelBen_POC_result','G3_c_pb_flux','nav_lon','nav_lat'],
#['fabm_st2DnQ1_c','fabm_st2DnQ6_c','fabm_st2DnQ7_c','fabm_st2DnQ17_c','fabm_st2DnH1_c','fabm_st2DnH2_c','fabm_st2DnY2_c','fabm_st2DnY3_c','fabm_st2DnY4_c','fabm_st2DnG3_c'],
#['fabm_st2DnQ1_c','fabm_st2DnQ6_c','fabm_st2DnQ7_c','fabm_st2DnQ17_c','fabm_st2DnH1_c','fabm_st2DnH2_c','fabm_st2DnY2_c','fabm_st2DnY3_c','fabm_st2DnY4_c','fabm_st2DnG3_c']]
par_3d=['TRNO3_c','TRNP1_c','TRNP2_c','TRNP3_c','TRNP4_c','TRNB1_c','TRNZ4_c','TRNZ5_c','TRNZ6_c','TRNR4_c','TRNR6_c','TRNR8_c','TRNR1_c','TRNR2_c','TRNR3_c','TRNL2_c']
par_2d=['fabm_st2DnQ1_c','fabm_st2DnQ6_c','fabm_st2DnQ7_c','fabm_st2DnQ17_c','fabm_st2DnH1_c','fabm_st2DnH2_c','fabm_st2DnY2_c','fabm_st2DnY3_c','fabm_st2DnY4_c','fabm_st2DnG3_c']
adv_3d=['XAD_O3_c_e3t']
modelvars=adv_3d
# main() to take an optional 'argv' argument, which allows us to call it from the interactive Python prompt:
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--basedir',nargs=1,help='base directory with model files')
args = parser.parse_args()
print(args)
try:
base = args.basedir
else:
base = base
# Naughty datasets might require decode_cf=False
# Here it just needed decode_times=False
print('********************')
print(modelpaths[0])
#data = xr.open_dataset(modelpaths[0],decode_times=False)
modelout = xr.open_mfdataset(modelpaths) #,decode_times=False)
#print(modelout)
for modelvar in modelvars:
vardf=modelout[modelvar]
print(vardf)
# print attributes
for at in vardf.attrs:
print(at+':\t\t',end=' ')
print(vardf.attrs[at])
timeavg=vardf.mean('time_counter')
timeavg.plot()
if __name__ == "__main__":
main()
| gpl-3.0 | 214,627,983,886,993,120 | 32.313253 | 179 | 0.629916 | false | 2.375313 | false | false | false |
unibg-gislab/treets | treets/db_client.py | 1 | 5982 | #! /urs/bin/python
# coding: utf8
from __future__ import print_function
import pymongo
from random import uniform
TWEETS_LIMIT = 0
TRACES_LIMIT = 0
class DBClient(object):
'''Docstring for DBClient'''
def __init__(self):
super(DBClient, self).__init__()
self.mongo = pymongo.MongoClient()
self.db = self.mongo.treets
self.db.tweets.create_index('userName')
self.db.users.create_index([('userName', 'text')])
self.db.tweets.create_index([('textMessage', 'text')])
self.db.tweets.ensure_index([('location', pymongo.GEOSPHERE)])
#self.users = self.tweets.distinct('userName')[:limit]
def setup_db(self):
self.create_locations()
self.check_text_index()
self.create_users_collection()
self.remove_users_and_tweets(100)
def remove_users_and_tweets(self, threshold_max, threshold_min=1):
found = self.db.users.find( { '$where': 'this.tweetsIds.length >' + str(threshold_max) })
for u in found:
self.db.tweets.remove({'_id': {'$in': u['tweetsIds']}})
self.db.users.remove( u )
def create_users_collection(self):
self.db.users.remove()
users = self.db.tweets.distinct('userName')
users_coll = []
for u in users:
user = {}
user['userName'] = u
user['tweetsIds'] = self.db.tweets.find({'userName': u}).distinct('_id')
users_coll.append(user)
self.db.users.insert(users_coll)
def create_locations(self):
print('creating locations for geo-indexing, this may take a while')
for t in self.db.tweets.find():
coords = t['geo']
t['location'] = {'type': 'Point', 'coordinates': coords[::-1]}
self.db.tweets.save(t)
self.db.tweets.ensure_index([('location', pymongo.GEOSPHERE)])
def check_text_index(self):
try:
self.db.tweets.create_index([('textMessage', 'text')])
except:
print('converting texts to unicode, this may take a while')
for t in self.db.tweets.find():
t['textMessage'] = unicode(t['textMessage'])
self.db.tweets.save(t)
self.db.tweets.create_index([('textMessage', 'text')])
def get_tweets(self, limit=TWEETS_LIMIT):
'''
Returns first <limit> tweets
'''
return self.db.tweets.find().sort([('_id', -1)]).limit(limit)
def get_random_tweets(self, limit=TWEETS_LIMIT):
'''
returns <limit> random tweets
'''
lenght = self.db.tweets.find().count()
rand = int(uniform(0, 1)*lenght)
return self.db.tweets.find().limit(limit).skip(rand)
def get_tweets_near_point(self, coords, dist, limit=TWEETS_LIMIT):
'''
returns <limit> tweets whithin <dist> meters from coords
'''
return self.db.tweets.find({
'location': {
'$nearSphere': {
'$geometry': {
'type': 'Point', 'coordinates': coords
}, '$maxDistance': dist
}
}
}).sort([('_id', -1)])
def get_tweets_near_point_and_text(self, coords, dist, text, limit=TWEETS_LIMIT):
'''
returns <limit> tweets whithin <dist> meters from coords
'''
return self.db.tweets.find(
{
"$and":
[{"location":{'$geoWithin':{'$centerSphere': [coords,dist/6378.1]}}},
{'$text':{'$search': text}}]
}).sort([('_id', -1)])
def get_tweets_for_text(self, text, limit=TWEETS_LIMIT):
'''
search for tweets containing <text> and returns results
'''
return self.db.tweets.find({'$text': {'$search': text}}).sort([('_id', -1)]).limit(limit)
def get_tweets_for_user(self, user, limit=TWEETS_LIMIT):
'''
returns tweets posted by user
'''
return self.db.tweets.find({'_id': {'$in': user['tweetsIds']}})
#return self.db.tweets.find({'userName': user}).sort([('_id', -1)]).limit(limit)
def get_tweets_for_user_str(self, username, limit=TWEETS_LIMIT):
user = self.db.users.find_one({'$text': {'$search': username}})
if user:
return [self.get_tweets_for_user(user, limit)]
else:
return []
def get_traces(self, limit=TRACES_LIMIT):
'''
Returns first <limit> lists of tweets from the same users
'''
users = self.db.users.find().limit(limit)
return [self.get_tweets_for_user(user) for user in users]
def get_traces_near_point(self, coords, dist, limit=TRACES_LIMIT):
'''
TODO docstring
'''
users = self.get_tweets_near_point(coords, dist).distinct('userName')
users_objs = self.db.users.find({'userName': {'$in': users}}).limit(limit)
return [self.get_tweets_for_user(user) for user in users_objs]
def get_traces_near_point_and_text(self, coords, dist, text, limit=TRACES_LIMIT):
'''
TODO docstring
'''
users = self.get_tweets_near_point_and_text(coords, dist, text).distinct('userName')
users_objs = self.db.users.find({'userName': {'$in': users}}).limit(limit)
return [self.get_tweets_for_user(user) for user in users_objs]
def get_traces_for_text(self, text, limit=TRACES_LIMIT):
'''
TODO docstring
'''
users = self.get_tweets_for_text(text, limit=limit).distinct('userName')
users_objs = self.db.users.find({'userName': {'$in': users}}).limit(limit)
return [self.get_tweets_for_user(user) for user in users_objs]
def get_trace_for_user(self, username):
'''
TODO docstring
'''
return self.get_tweets_for_user_str(username)
if __name__ == '__main__':
client = DBClient()
#client.create_users_collection()
client.remove_users_and_tweets(100, 3)
| mit | -6,620,564,958,461,042,000 | 35.699387 | 97 | 0.564694 | false | 3.531287 | false | false | false |
PaloAltoNetworks-BD/ansible-pan | library/panos_object.py | 1 | 17603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_object
short_description: create/read/update/delete object in PAN-OS or Panorama
description:
- Policy objects form the match criteria for policy rules and many other functions in PAN-OS. These may include
- address object, address groups, service objects, service groups, and tag.
author: "Bob Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
deprecated:
removed_in: "2.9"
why: Updated to idempotent modules
alternative: >
Use M(panos_address_object), M(panos_address_group),
M(panos_service_object), M(panos_service_group), or
M(panos_tag_object) as appropriate.
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device or Panorama management console being configured.
required: true
username:
description:
- Username credentials to use for authentication.
required: false
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The operation to be performed. Supported values are I(add)/I(delete)/I(find).
required: true
addressobject:
description:
- The name of the address object.
address:
description:
- The IP address of the host or network in CIDR notation.
address_type:
description:
- The type of address object definition. Valid types are I(ip-netmask) and I(ip-range).
addressgroup:
description:
- A static group of address objects or dynamic address group.
static_value:
description:
- A group of address objects to be used in an addressgroup definition.
dynamic_value:
description:
- The filter match criteria to be used in a dynamic addressgroup definition.
serviceobject:
description:
- The name of the service object.
source_port:
description:
- The source port to be used in a service object definition.
destination_port:
description:
- The destination port to be used in a service object definition.
protocol:
description:
- The IP protocol to be used in a service object definition. Valid values are I(tcp) or I(udp).
servicegroup:
description:
- A group of service objects.
services:
description:
- The group of service objects used in a servicegroup definition.
description:
description:
- The description of the object.
tag_name:
description:
- The name of an object or rule tag.
color:
description: >
- The color of the tag object. Valid values are I(red, green, blue, yellow, copper, orange, purple, gray,
light green, cyan, light gray, blue gray, lime, black, gold, and brown).
vsys:
description:
- The vsys to put the object into.
- Firewall only.
default: "vsys1"
devicegroup:
description:
- The name of the (preexisting) Panorama device group.
- If undefined and ip_address is Panorama, this defaults to shared.
required: false
default: None
commit:
description:
- Commit the config change.
default: False
'''
EXAMPLES = '''
- name: search for shared address object
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'find'
address: 'DevNet'
- name: create an address group in devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
addressgroup: 'Prod_DB_Svrs'
static_value: ['prod-db1', 'prod-db2', 'prod-db3']
description: 'Production DMZ database servers'
tag_name: 'DMZ'
devicegroup: 'DMZ Firewalls'
- name: create a global service for TCP 3306
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
serviceobject: 'mysql-3306'
destination_port: '3306'
protocol: 'tcp'
description: 'MySQL on tcp/3306'
- name: create a global tag
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
tag_name: 'ProjectX'
color: 'yellow'
description: 'Associated with Project X'
- name: delete an address object from a devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
addressobject: 'Win2K test'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
from pan.xapi import PanXapiError
import pandevice
from pandevice.base import PanDevice
from pandevice import panorama
from pandevice import objects
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def find_object(device, dev_group, obj_name, obj_type):
# Get the firewall objects
obj_type.refreshall(device)
if isinstance(device, pandevice.firewall.Firewall):
addr = device.find(obj_name, obj_type)
return addr
elif isinstance(device, pandevice.panorama.Panorama):
addr = device.find(obj_name, obj_type)
if addr is None:
if dev_group:
device.add(dev_group)
obj_type.refreshall(dev_group)
addr = dev_group.find(obj_name, obj_type)
return addr
else:
return False
def create_object(**kwargs):
if kwargs['addressobject']:
newobject = objects.AddressObject(
name=kwargs['addressobject'],
value=kwargs['address'],
type=kwargs['address_type'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.type and newobject.value:
return newobject
else:
return False
elif kwargs['addressgroup']:
newobject = objects.AddressGroup(
name=kwargs['addressgroup'],
static_value=kwargs['static_value'],
dynamic_value=kwargs['dynamic_value'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.static_value or newobject.dynamic_value:
return newobject
else:
return False
elif kwargs['serviceobject']:
newobject = objects.ServiceObject(
name=kwargs['serviceobject'],
protocol=kwargs['protocol'],
source_port=kwargs['source_port'],
destination_port=kwargs['destination_port'],
tag=kwargs['tag_name']
)
if newobject.protocol and newobject.destination_port:
return newobject
else:
return False
elif kwargs['servicegroup']:
newobject = objects.ServiceGroup(
name=kwargs['servicegroup'],
value=kwargs['services'],
tag=kwargs['tag_name']
)
if newobject.value:
return newobject
else:
return False
elif kwargs['tag_name']:
t = objects.Tag
c = t.color_code(kwargs['color'])
newobject = objects.Tag(
name=kwargs['tag_name'],
color=c,
comments=kwargs['description']
)
if newobject.name:
return newobject
else:
return False
else:
return False
def add_object(device, dev_group, new_object):
if dev_group:
dev_group.add(new_object)
else:
device.add(new_object)
new_object.create()
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(required=True, choices=['add', 'update', 'delete', 'find']),
addressobject=dict(default=None),
addressgroup=dict(default=None),
serviceobject=dict(default=None),
servicegroup=dict(default=None),
address=dict(default=None),
address_type=dict(default='ip-netmask', choices=['ip-netmask', 'ip-range', 'fqdn']),
static_value=dict(type='list', default=None),
dynamic_value=dict(default=None),
protocol=dict(default=None, choices=['tcp', 'udp']),
source_port=dict(default=None),
destination_port=dict(default=None),
services=dict(type='list', default=None),
description=dict(default=None),
tag_name=dict(default=None),
color=dict(default=None, choices=['red', 'green', 'blue', 'yellow', 'copper', 'orange', 'purple',
'gray', 'light green', 'cyan', 'light gray', 'blue gray',
'lime', 'black', 'gold', 'brown']),
vsys=dict(default='vsys1'),
devicegroup=dict(default=None),
commit=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']],
mutually_exclusive=[['addressobject', 'addressgroup',
'serviceobject', 'servicegroup',
'tag_name']]
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
addressobject = module.params['addressobject']
addressgroup = module.params['addressgroup']
serviceobject = module.params['serviceobject']
servicegroup = module.params['servicegroup']
address = module.params['address']
address_type = module.params['address_type']
static_value = module.params['static_value']
dynamic_value = module.params['dynamic_value']
protocol = module.params['protocol']
source_port = module.params['source_port']
destination_port = module.params['destination_port']
services = module.params['services']
description = module.params['description']
tag_name = module.params['tag_name']
color = module.params['color']
vsys = module.params['vsys']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if hasattr(device, 'refresh_devices'):
# Panorama: set the device group.
if devicegroup == 'shared':
# Device group of None is "shared" scope for Panorama.
devicegroup = None
if devicegroup is not None:
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
else:
# Firewall: set the targetted vsys.
device.vsys = vsys
# What type of object are we talking about?
if addressobject:
obj_name = addressobject
obj_type = objects.AddressObject
elif addressgroup:
obj_name = addressgroup
obj_type = objects.AddressGroup
elif serviceobject:
obj_name = serviceobject
obj_type = objects.ServiceObject
elif servicegroup:
obj_name = servicegroup
obj_type = objects.ServiceGroup
elif tag_name:
obj_name = tag_name
obj_type = objects.Tag
else:
module.fail_json(msg='No object type defined!')
# Which operation shall we perform on the object?
msg = None
if operation == "find":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Object matched'
)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "delete":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, delete it
if match:
try:
match.delete()
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
msg = "Object '{0}' successfully deleted".format(obj_name)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "add":
# Search for the object. Fail if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
module.fail_json(msg='Object \'%s\' already exists. Use operation: \'update\' to change it.' % obj_name)
else:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
msg = "Object '{0}' successfully added".format(obj_name)
elif operation == "update":
# Search for the object. Update if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
msg = "Object '{0}' successfully updated.".format(obj_name)
else:
module.fail_json(msg='Object \'%s\' does not exist. Use operation: \'add\' to add it.' % obj_name)
# Optional: commit the change.
if commit:
try:
device.commit(sync=True)
except PanDeviceError as e:
module.fail_json(msg='Failed to commit: {0}'.format(e))
# Done.
module.exit_json(changed=True, msg=msg)
if __name__ == '__main__':
main()
| isc | 49,532,090,653,482,990 | 34.418511 | 118 | 0.589104 | false | 4.259134 | false | false | false |
linksuccess/linksuccess | parsingframework/heatmaps.py | 1 | 18035 | import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import cPickle as pickle
import MySQLdb
from wsd.database import MySQLDatabase
import matplotlib.cm as cm
from matplotlib.colors import LogNorm, Normalize, BoundaryNorm, PowerNorm
from conf import *
from matplotlib import style
style.use('acm-3col')
import pylab
params = {
'font.family' : 'serif',
'font.serif' : ['Times New Roman'],
'font.size' : 7
}
pylab.rcParams.update(params)
def clicks_heatmap():
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_coords_clicks()
print 'coord loaded'
links = {}
x = []
y = []
values = []
confident_values = []
not_confident_values = []
x_conf = []
y_conf = []
x_not_conf = []
y_not_conf = []
number_of_not_confident_clicks=0
number_of_confident_clicks = 0
number_of_valid_normed_links=0
for coord in coords:
try:
v = links[coord['key']]
links[coord['key']]+=1
except:
links[coord['key']]=0
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(coord['page_length'])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
number_of_valid_normed_links+=1
if links[coord['key']]==0:
x_conf.append(x_normed)
y_conf.append(y_normed)
values.append(float(coord['counts']))
number_of_confident_clicks+=1
confident_values.append(coord['counts'])
else:
x_not_conf.append(x_normed)
y_not_conf.append(y_normed)
values.append(float(coord['counts'])/float(links[coord['key']])+1.0)
number_of_not_confident_clicks+=1
not_confident_values.append(float(coord['counts'])/float(links[coord['key']]))
print '###########'
print sum(values)
print sum(confident_values)
print number_of_confident_clicks
print sum(not_confident_values)
print number_of_not_confident_clicks
print number_of_valid_normed_links
print len(coords)
print '###########'
heatmap, xedges, yedges = np.histogram2d(x_conf, y_conf, bins=100, weights=confident_values)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Log Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_lognormed_self_loop_confident.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_normed_self_loop_confident.pdf')
print "conf done"
heatmap, xedges, yedges = np.histogram2d(x_not_conf, y_not_conf, bins=100, weights=not_confident_values)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Log Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_lognormed_self_loop_not_confident.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_normed_self_loop_not_confident.pdf')
print " not conf done"
heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Log Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_lognormed_self_loop_1.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_normed_self_loop_1.pdf')
print "done"
def clicks_heatmap_first_occ():
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_coords_clicks_first_occ()
print 'coord loaded'
links = {}
x = []
y = []
values = []
for link in coords.values():
x_normed = float(link['x'])/float(1920)
y_normed = float(link['y'])/float(link['page_length'])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
values.append(float(link['counts']))
heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ]
fig_size = (2.4, 2)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Log Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_lognormed_self_loop_first_occ.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_normed_self_loop_first_occ.pdf')
print "done"
def clicks_heatmap_total():
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_coords_clicks()
print 'coord loaded'
links = {}
x = []
y = []
values = []
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(coord['page_length'])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
values.append(float(coord['counts']))
heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ]
fig_size = (2.4, 2)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Log Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_lognormed_self_loop_total.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Clicks Heatmap Normalized")
plt.show()
plt.savefig('output/clicks_heatmap_normed_self_loop_total.pdf')
print "done"
def links_heatmap():
#http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set
# Get URLs from a text file, remove white space.
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_coords()
print 'coord loaded'
x=[]
y=[]
page_lenghts = db_worker_view.retrieve_all_page_lengths()
print 'lenghts loaded'
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(page_lenghts[coord['source_article_id']])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Log Normalized")
plt.show()
plt.savefig('output/links_heatmap_lognormed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links_heatmap_normed_self_loop.pdf')
print "done"
def multiple_links_heatmap():
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_multpile_occ()
print 'coord loaded'
page_lenghts = db_worker_view.retrieve_all_page_lengths()
print 'lenghts loaded'
links = {}
x = []
y = []
x_conf = []
y_conf = []
x_not_conf = []
y_not_conf = []
number_of_not_confident_clicks=0
number_of_confident_clicks = 0
number_of_valid_normed_links=0
for coord in coords:
try:
v = links[coord['key']]
links[coord['key']]+=1
except:
links[coord['key']]=0
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(page_lenghts[coord['key'][0]])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
number_of_valid_normed_links+=1
if links[coord['key']]==0:
x_conf.append(x_normed)
y_conf.append(y_normed)
number_of_confident_clicks+=1
else:
x_not_conf.append(x_normed)
y_not_conf.append(y_normed)
number_of_not_confident_clicks+=1
print '###########'
print number_of_confident_clicks
print number_of_not_confident_clicks
print number_of_valid_normed_links
print len(coords)
print '###########'
heatmap, xedges, yedges = np.histogram2d(x_conf, y_conf, bins=100)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Log Normalized")
plt.show()
plt.savefig('output/links_heatmap_lognormed_self_loop_unique.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links_heatmap_normed_self_loop_unique.pdf')
print "unique done"
heatmap, xedges, yedges = np.histogram2d(x_not_conf, y_not_conf, bins=100)
extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
fig_size = (2.4, 2)
#fig_size = (3.5, 3)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Log Normalized")
plt.show()
plt.savefig('output/links_heatmap_lognormed_self_loop_multiple.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links_heatmap_normed_self_loop_multiple.pdf')
print "done"
def links_heatmap_rel_prob():
#http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set
# Get URLs from a text file, remove white space.
print 'loading'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
db_worker_view = db.get_work_view()
coords = db_worker_view.retrieve_all_links_coords()
x=[]
y=[]
page_lenghts = db_worker_view.retrieve_all_page_lengths()
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(page_lenghts[coord['source_article_id']])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
links_heatmap_hist, xedges, yedges = np.histogram2d(x, y, normed=True, bins=100)
links_extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
coords = db_worker_view.retrieve_all_links_coords_clicks()
print 'coord loaded'
links = {}
x = []
y = []
values = []
for coord in coords:
try:
v = links[coord['key']]
links[coord['key']]+=1
except:
links[coord['key']]=0
for coord in coords:
x_normed = float(coord['x'])/float(1920)
y_normed = float(coord['y'])/float(coord['page_length'])
if x_normed <=1.0 and y_normed <=1.0:
x.append(x_normed)
y.append(y_normed)
if links[coord['key']]==0:
#x.append(x_normed)
#y.append(y_normed)
values.append(float(coord['counts']))
else:
values.append(float(coord['counts'])/float(links[coord['key']]))
clicks_heatmap_hist, xedges, yedges = np.histogram2d(x, y, bins=100, normed=True, weights=values)
clicks_extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
substraction_hist = np.subtract(clicks_heatmap_hist,links_heatmap_hist)
#rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist)
with np.errstate(divide='ignore', invalid='ignore'):
rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist)
rel_prob_hist[rel_prob_hist == np.inf] = 0
rel_prob_hist = np.nan_to_num(rel_prob_hist)
fig_size = (2.4, 2)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(substraction_hist, extent=clicks_extent, origin='upper',norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
plt.show()
plt.savefig('output/clicks-links_heatmap_normed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet'))
plt.colorbar()
plt.show()
plt.savefig('output/clicks_over_links_heatmap_normed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
plt.show()
plt.savefig('output/clicks-links_heatmap_lognormed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
plt.show()
plt.savefig('output/clicks_over_links_heatmap_lognormed_self_loop.pdf')
substraction_hist = np.subtract(links_heatmap_hist, clicks_heatmap_hist)
#rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist)
with np.errstate(divide='ignore', invalid='ignore'):
rel_prob_hist = np.divide(links_heatmap_hist, clicks_heatmap_hist)
rel_prob_hist[rel_prob_hist == np.inf] = 0
rel_prob_hist = np.nan_to_num(rel_prob_hist)
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links-clicks_heatmap_normed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links_over_clicks_heatmap_normed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links-clicks_heatmap_lognormed_self_loop.pdf')
plt.clf()
plt.figure(figsize=fig_size)
plt.grid(True)
plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet'))
plt.colorbar()
#plt.title("Links Heatmap Normalized")
plt.show()
plt.savefig('output/links_over_clicks_heatmap_lognormed_self_loop.pdf')
print "done"
if __name__ == '__main__':
links_heatmap()
clicks_heatmap_first_occ()
clicks_heatmap_total()
clicks_heatmap()
multiple_links_heatmap()
links_heatmap_rel_prob()
| mit | -2,729,897,635,614,966,000 | 29.158863 | 115 | 0.623177 | false | 3.050059 | false | false | false |
google/dotty | efilter/stdlib/core.py | 1 | 10013 | # EFILTER Forensic Query Language
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EFILTER stdlib - core module.
This module defines functions that are always included in every query, as well
as the base classes TypedFunction and LibraryModule, which are used to represent
stdlib functions and modules.
"""
__author__ = "Adam Sindelar <[email protected]>"
import itertools
import six
import threading
from efilter import protocol
from efilter.protocols import applicative
from efilter.protocols import counted
from efilter.protocols import reducer
from efilter.protocols import repeated
from efilter.protocols import structured
class TypedFunction(object):
"""Represents an EFILTER-callable function with reflection support.
Each function in the standard library is an instance of a subclass of
this class. Subclasses override __call__ and the reflection API.
"""
name = None
def apply(self, args, kwargs):
return self(*args, **kwargs)
def __call__(self):
raise NotImplementedError()
@classmethod
def reflect_static_args(cls):
return itertools.repeat(protocol.AnyType)
@classmethod
def reflect_static_return(cls):
return protocol.AnyType
applicative.IApplicative.implicit_dynamic(TypedFunction)
class TypedReducer(object):
"""Represents an EFILTER-callable reducer function.
TypedReducer supports the IReducer protocol, but also works as a function
(IApplicative), to allow it to reduce values inside rows in a query.
"""
name = None
# IApplicative
def apply(self, args, kwargs):
return self(*args, **kwargs)
def __call__(self, data, chunk_size=None):
return reducer.reduce(self, data, chunk_size)
@classmethod
def reflect_static_args(cls):
return (repeated.IRepeated,)
@classmethod
def reflect_static_return(cls):
return protocol.AnyType
# IReducer
def fold(self, chunk):
raise NotImplementedError()
def merge(self, left, right):
raise NotImplementedError()
def finalize(self, intermediate):
raise NotImplementedError()
applicative.IApplicative.implicit_dynamic(TypedReducer)
reducer.IReducer.implicit_dynamic(TypedReducer)
class SingletonReducer(object):
"""Preserves a literal value and ensures it's a singleton."""
name = "singleton"
def fold(self, chunk):
iterator = iter(chunk)
first = next(iterator)
for item in iterator:
if item != first:
raise ValueError("All values in a singleton reducer must be "
"equal to each other. Got %r != %r." % (
first, item))
return first
def merge(self, left, right):
if left != right:
raise ValueError("All values in a singleton reducer must be "
"equal to each other. Got %r != %r." % (
left, right))
return left
def finalize(self, intermediate):
return intermediate
class LibraryModule(object):
"""Represents a part of the standard library.
Each library module consists of a collection of vars, which are mostly
instances of TypedFunction. The stdcore module also contains basic types,
such as 'str' or 'int', in addition to functions.
"""
vars = None
name = None
# This is a class-level global storing all instances by their name.
ALL_MODULES = {}
_all_modules_lock = threading.Lock()
def __init__(self, vars, name):
self.vars = vars
self.name = name
self._all_modules_lock.acquire()
try:
if name in self.ALL_MODULES:
raise ValueError("Duplicate module name %r." % name)
self.ALL_MODULES[name] = self
finally:
self._all_modules_lock.release()
def __del__(self):
"""If modules are being used properly this will only happen on exit."""
self._all_modules_lock.acquire()
try:
del self.ALL_MODULES[self.name]
finally:
self._all_modules_lock.release()
def __repr__(self):
return "LibraryModule(name=%r, vars=%r)" % (self.name, self.vars)
def getmembers_runtime(self):
return self.vars.keys()
def resolve(self, name):
return self.vars[name]
def reflect_runtime_member(self, name):
return type(self.vars[name])
structured.IStructured.implicit_static(LibraryModule)
class First(TypedFunction):
"""Return the first value from an IRepeated."""
name = "first"
def __call__(self, x):
for value in repeated.getvalues(x):
return value
@classmethod
def reflect_static_args(cls):
return (repeated.IRepeated,)
@classmethod
def reflect_static_return(cls):
return protocol.AnyType
class Take(TypedFunction):
"""Take only the first 'count' elements from 'x' (tuple or IRepeated).
This implementation is lazy.
Example:
take(2, (1, 2, 3, 4)) -> (1, 2)
Arguments:
count: How many elements to return.
x: The tuple or IRepeated to take from.
Returns:
A lazy IRepeated.
"""
name = "take"
def __call__(self, count, x):
def _generator():
if isinstance(x, tuple):
values = x
else:
values = repeated.getvalues(x)
for idx, value in enumerate(values):
if idx == count:
break
yield value
return repeated.lazy(_generator)
@classmethod
def reflect_static_args(cls):
return (int, repeated.IRepeated)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
class Drop(TypedFunction):
"""Drop the first 'count' elements from 'x' (tuple or IRepeated).
This implementation is lazy.
Example:
drop(2, (1, 2, 3, 4)) -> (3, 4)
Arguments:
count: How many elements to drop.
x: The tuple or IRepeated to drop from.
Returns:
A lazy IRepeated.
"""
name = "drop"
def __call__(self, count, x):
def _generator():
if isinstance(x, tuple):
values = x
else:
values = repeated.getvalues(x)
for idx, value in enumerate(values):
if idx < count:
continue
yield value
return repeated.lazy(_generator)
@classmethod
def reflect_static_args(cls):
return (int, repeated.IRepeated)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
class Lower(TypedFunction):
"""Make a string lowercase."""
name = "lower"
def __call__(self, x):
return x.lower()
@classmethod
def reflect_static_args(cls):
return (six.string_types[0],)
@classmethod
def reflect_static_return(cls):
return six.string_types[0]
class Find(TypedFunction):
"""Returns the position of 'needle' in 'string', or -1 if not found."""
name = "find"
def __call__(self, string, needle):
return string.find(needle)
@classmethod
def reflect_static_args(cls):
return (six.string_types[0], six.string_types[0])
@classmethod
def reflect_static_return(cls):
return int
class Count(TypedReducer):
"""Counts the number of elements in a tuple or of values in a repeated."""
name = "count"
def fold(self, chunk):
return counted.count(chunk)
def merge(self, left, right):
return left + right
def finalize(self, intermediate):
return intermediate
@classmethod
def reflect_static_return(cls):
return int
class Reverse(TypedFunction):
"""Reverse a tuple of a repeated and maintains the type."""
name = "reverse"
def __call__(self, x):
if isinstance(x, tuple):
return tuple(reversed(x))
return repeated.meld(*reversed(repeated.getvalues(x)))
@classmethod
def reflect_static_args(cls):
return (repeated.IRepeated,)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
class Materialize(TypedFunction):
"""Force a repeated value (e.g. output of map) to materialize in memory."""
name = "materialize"
def __call__(self, rv):
return repeated.repeated(*list(rv))
@classmethod
def reflect_static_args(cls):
return (repeated.IRepeated,)
@classmethod
def reflect_static_return(cls):
return repeated.IRepeated
MODULE = LibraryModule(name="stdcore",
vars={Take.name: Take(),
Drop.name: Drop(),
Count.name: Count(),
Reverse.name: Reverse(),
Lower.name: Lower(),
Find.name: Find(),
SingletonReducer.name: SingletonReducer(),
First.name: First(),
Materialize.name: Materialize(),
# Built-in types below:
"int": int,
"str": six.text_type,
"bytes": six.binary_type,
"float": float})
| apache-2.0 | 3,722,554,393,945,833,500 | 24.478372 | 80 | 0.602517 | false | 4.327139 | false | false | false |
endlessm/chromium-browser | third_party/chromite/lib/image_lib_unittest.py | 1 | 25414 | # -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the image_lib module."""
from __future__ import print_function
import collections
import gc
import glob
import os
import stat
import mock
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import image_lib
from chromite.lib import osutils
from chromite.lib import retry_util
from chromite.lib import partial_mock
# pylint: disable=protected-access
class FakeException(Exception):
"""Fake exception used for testing exception handling."""
FAKE_PATH = '/imaginary/file'
LOOP_DEV = '/dev/loop9999'
LOOP_PART_COUNT = 12
LOOP_PARTITION_INFO = [
image_lib.PartitionInfo(
1, 2928640, 2957311, 28672, 14680064, 'STATE', ''),
image_lib.PartitionInfo(
2, 20480, 53247, 32768, 16777216, 'KERN-A', ''),
image_lib.PartitionInfo(
3, 286720, 2928639, 2641920, 1352663040, 'ROOT-A', ''),
image_lib.PartitionInfo(
4, 53248, 86015, 32768, 16777216, 'KERN-B', ''),
image_lib.PartitionInfo(
5, 282624, 286719, 4096, 2097152, 'ROOT-B', ''),
image_lib.PartitionInfo(
6, 16448, 16448, 1, 512, 'KERN-C', ''),
image_lib.PartitionInfo(
7, 16449, 16449, 1, 512, 'ROOT-C', ''),
image_lib.PartitionInfo(
8, 86016, 118783, 32768, 16777216, 'OEM', ''),
image_lib.PartitionInfo(
9, 16450, 16450, 1, 512, 'reserved', ''),
image_lib.PartitionInfo(
10, 16451, 16451, 1, 512, 'reserved', ''),
image_lib.PartitionInfo(
11, 64, 16447, 16384, 8388608, 'RWFW', ''),
image_lib.PartitionInfo(
12, 249856, 282623, 32768, 16777216, 'EFI-SYSTEM', ''),
]
LOOP_PARTS_DICT = {
p.number: '%sp%d' % (LOOP_DEV, p.number) for p in LOOP_PARTITION_INFO}
LOOP_PARTS_LIST = LOOP_PARTS_DICT.values()
class LoopbackPartitionsMock(image_lib.LoopbackPartitions):
"""Mocked loopback partition class to use in unit tests."""
# pylint: disable=super-init-not-called
def __init__(self, path, destination=None, part_ids=None, mount_opts=None,
dev=LOOP_DEV, part_count=0):
"""Initialize.
Args:
(shared with LoopbackPartitions)
path: Path to the image file.
destination: destination directory.
part_ids: Mount these partitions at context manager entry.
mount_opts: Use these mount_opts for mounting |part_ids|.
(unique to LoopbackPartitionsMock)
dev: Path for the base loopback device.
part_count: How many partition device files to make up. Default: normal
partition table.
"""
self.path = path
self.dev = dev
self.part_ids = part_ids
self.mount_opts = mount_opts
if destination:
self.destination = destination
else:
self.destination = osutils.TempDir()
if part_count:
self._gpt_table = [
image_lib.PartitionInfo(num, 0, 0, 0, '', 'my-%d' % num, '')
for num in range(1, part_count + 1)]
else:
self._gpt_table = LOOP_PARTITION_INFO
self.parts = {p.number: '%sp%s' % (dev, p.number)
for p in self._gpt_table}
self.enable_rw_called = set()
self.disable_rw_called = set()
# pylint: enable=super-init-not-called
def EnableRwMount(self, part_id, offset=0):
"""Stub out enable rw mount."""
self.enable_rw_called.add((part_id, offset))
def DisableRwMount(self, part_id, offset=0):
"""Stub out disable rw mount."""
self.disable_rw_called.add((part_id, offset))
def _Mount(self, part, mount_opts):
"""Stub out mount operations."""
dest_number, _ = self._GetMountPointAndSymlink(part)
# Don't actually even try to mount it, let alone mark it mounted.
return dest_number
def _Unmount(self, part):
"""Stub out unmount operations."""
def close(self):
pass
class LoopbackPartitionsTest(cros_test_lib.MockTempDirTestCase):
"""Test the loopback partitions class"""
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.StartPatcher(self.rc_mock)
self.rc_mock.SetDefaultCmdResult()
self.rc_mock.AddCmdResult(partial_mock.In('--show'), output=LOOP_DEV)
self.PatchObject(image_lib, 'GetImageDiskPartitionInfo',
return_value=LOOP_PARTITION_INFO)
self.PatchObject(glob, 'glob', return_value=LOOP_PARTS_LIST)
self.mount_mock = self.PatchObject(osutils, 'MountDir')
self.umount_mock = self.PatchObject(osutils, 'UmountDir')
self.retry_mock = self.PatchObject(retry_util, 'RetryException')
def fake_which(val, *_arg, **_kwargs):
return val
self.PatchObject(osutils, 'Which', side_effect=fake_which)
def testContextManager(self):
"""Test using the loopback class as a context manager."""
with image_lib.LoopbackPartitions(FAKE_PATH) as lb:
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testContextManagerWithMounts(self):
"""Test using the loopback class as a context manager with mounts."""
syml = self.PatchObject(osutils, 'SafeSymlink')
part_ids = (1, 'ROOT-A')
with image_lib.LoopbackPartitions(
FAKE_PATH, part_ids=part_ids, mount_opts=('ro',)) as lb:
expected_mounts = set()
expected_calls = []
for part_id in part_ids:
for part in LOOP_PARTITION_INFO:
if part.name == part_id or part.number == part_id:
expected_mounts.add(part)
expected_calls.append(
mock.call('dir-%d' % part.number, os.path.join(
lb.destination, 'dir-%s' % part.name)))
break
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
self.assertEqual(expected_calls, syml.call_args_list)
self.assertEqual(expected_mounts, lb._mounted)
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testManual(self):
"""Test using the loopback class closed manually."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
lb.close()
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def gcFunc(self):
"""This function isolates a local variable so it'll be garbage collected."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
def testGarbageCollected(self):
"""Test using the loopback class closed by garbage collection."""
self.gcFunc()
# Force garbage collection in case python didn't already clean up the
# loopback object.
gc.collect()
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testMountUnmount(self):
"""Test Mount() and Unmount() entry points."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
# Mount four partitions.
lb.Mount((1, 3, 'ROOT-B', 'ROOT-C'))
for p in (1, 3, 5, 7):
self.mount_mock.assert_any_call(
'%sp%d' % (LOOP_DEV, p), '%s/dir-%d' % (self.tempdir, p),
makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',))
linkname = '%s/dir-%s' % (self.tempdir, LOOP_PARTITION_INFO[p - 1].name)
self.assertTrue(stat.S_ISLNK(os.lstat(linkname).st_mode))
self.assertEqual(4, self.mount_mock.call_count)
self.umount_mock.assert_not_called()
# Unmount half of them, confirm that they were unmounted.
lb.Unmount((1, 'ROOT-B'))
for p in (1, 5):
self.umount_mock.assert_any_call('%s/dir-%d' % (self.tempdir, p),
cleanup=False)
self.assertEqual(2, self.umount_mock.call_count)
self.umount_mock.reset_mock()
# Close the object, so that we unmount the other half of them.
lb.close()
for p in (3, 7):
self.umount_mock.assert_any_call('%s/dir-%d' % (self.tempdir, p),
cleanup=False)
self.assertEqual(2, self.umount_mock.call_count)
# Verify that the directories were cleaned up.
for p in (1, 3):
self.retry_mock.assert_any_call(
cros_build_lib.RunCommandError, 60, osutils.RmDir,
'%s/dir-%d' % (self.tempdir, p), sudo=True, sleep=1)
def testMountingMountedPartReturnsName(self):
"""Test that Mount returns the directory name even when already mounted."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
dirname = '%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)
# First make sure we get the directory name when we actually mount.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
# Then make sure we get it when we call it again.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
lb.close()
def testRemountCallsMount(self):
"""Test that Mount returns the directory name even when already mounted."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
devname = '%sp%d' % (LOOP_DEV, lb._gpt_table[0].number)
dirname = '%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)
# First make sure we get the directory name when we actually mount.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
self.mount_mock.assert_called_once_with(
devname, dirname,
makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',))
# Then make sure we get it when we call it again.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('remount', 'rw')))
self.assertEqual(
mock.call(devname, dirname, makedirs=True, skip_mtab=False,
sudo=True, mount_opts=('remount', 'rw')),
self.mount_mock.call_args)
lb.close()
def testGetPartitionDevName(self):
"""Test GetPartitionDevName()."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
for part in LOOP_PARTITION_INFO:
self.assertEqual('%sp%d' % (LOOP_DEV, part.number),
lb.GetPartitionDevName(part.number))
if part.name != 'reserved':
self.assertEqual('%sp%d' % (LOOP_DEV, part.number),
lb.GetPartitionDevName(part.name))
lb.close()
def test_GetMountPointAndSymlink(self):
"""Test _GetMountPointAndSymlink()."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
for part in LOOP_PARTITION_INFO:
expected = [os.path.join(lb.destination, 'dir-%s' % n)
for n in (part.number, part.name)]
self.assertEqual(expected, list(lb._GetMountPointAndSymlink(part)))
lb.close()
def testIsExt2OnVarious(self):
"""Test _IsExt2 works with the various partition types."""
FS_PARTITIONS = (1, 3, 8)
# STATE, ROOT-A, and OEM generally have ext2 filesystems.
for x in FS_PARTITIONS:
self.rc_mock.AddCmdResult(
partial_mock.In('if=%sp%d' % (LOOP_DEV, x)),
output=b'\x53\xef')
# Throw errors on all of the partitions that are < 1000 bytes.
for part in LOOP_PARTITION_INFO:
if part.size < 1000:
self.rc_mock.AddCmdResult(
partial_mock.In('if=%sp%d' % (LOOP_DEV, part.number)),
returncode=1, error='Seek failed\n')
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
# We expect that only the partitions in FS_PARTITIONS are ext2.
self.assertEqual(
[part.number in FS_PARTITIONS for part in LOOP_PARTITION_INFO],
[lb._IsExt2(part.name) for part in LOOP_PARTITION_INFO])
lb.close()
class LsbUtilsTest(cros_test_lib.MockTempDirTestCase):
"""Tests the various LSB utilities."""
def setUp(self):
# Patch os.getuid(..) to pretend running as root, so reading/writing the
# lsb-release file doesn't require escalated privileges and the test can
# clean itself up correctly.
self.PatchObject(os, 'getuid', return_value=0)
def testWriteLsbRelease(self):
"""Tests writing out the lsb_release file using WriteLsbRelease(..)."""
rc_mock = self.PatchObject(cros_build_lib, 'sudo_run')
fields = collections.OrderedDict((
('x', '1'), ('y', '2'), ('foo', 'bar'),
))
image_lib.WriteLsbRelease(self.tempdir, fields)
lsb_release_file = os.path.join(self.tempdir, 'etc', 'lsb-release')
expected_content = 'x=1\ny=2\nfoo=bar\n'
self.assertFileContents(lsb_release_file, expected_content)
rc_mock.assert_called_once_with([
'setfattr', '-n', 'security.selinux', '-v',
'u:object_r:cros_conf_file:s0',
os.path.join(self.tempdir, 'etc/lsb-release')])
# Test that WriteLsbRelease(..) correctly handles an existing file.
rc_mock = self.PatchObject(cros_build_lib, 'sudo_run')
fields = collections.OrderedDict((
('newkey1', 'value1'), ('newkey2', 'value2'), ('a', '3'), ('b', '4'),
))
image_lib.WriteLsbRelease(self.tempdir, fields)
expected_content = ('x=1\ny=2\nfoo=bar\nnewkey1=value1\nnewkey2=value2\n'
'a=3\nb=4\n')
self.assertFileContents(lsb_release_file, expected_content)
rc_mock.assert_called_once_with([
'setfattr', '-n', 'security.selinux', '-v',
'u:object_r:cros_conf_file:s0',
os.path.join(self.tempdir, 'etc/lsb-release')])
class BuildImagePathTest(cros_test_lib.MockTempDirTestCase):
"""BuildImagePath tests."""
def setUp(self):
self.board = 'board'
self.board_dir = os.path.join(self.tempdir, self.board)
D = cros_test_lib.Directory
filesystem = (
D(self.board, ('recovery_image.bin', 'other_image.bin')),
'full_path_image.bin',
)
cros_test_lib.CreateOnDiskHierarchy(self.tempdir, filesystem)
self.full_path = os.path.join(self.tempdir, 'full_path_image.bin')
def testBuildImagePath(self):
"""BuildImagePath tests."""
self.PatchObject(image_lib, 'GetLatestImageLink',
return_value=os.path.join(self.tempdir, self.board))
# Board and full image path provided.
result = image_lib.BuildImagePath(self.board, self.full_path)
self.assertEqual(self.full_path, result)
# Only full image path provided.
result = image_lib.BuildImagePath(None, self.full_path)
self.assertEqual(self.full_path, result)
# Full image path provided that does not exist.
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(self.board, '/does/not/exist')
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(None, '/does/not/exist')
# Default image is used.
result = image_lib.BuildImagePath(self.board, None)
self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
# Image basename provided.
result = image_lib.BuildImagePath(self.board, 'other_image.bin')
self.assertEqual(os.path.join(self.board_dir, 'other_image.bin'), result)
# Image basename provided that does not exist.
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(self.board, 'does_not_exist.bin')
default_mock = self.PatchObject(cros_build_lib, 'GetDefaultBoard')
# Nothing provided, and no default.
default_mock.return_value = None
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(None, None)
# Nothing provided, with default.
default_mock.return_value = 'board'
result = image_lib.BuildImagePath(None, None)
self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
class SecurityTestConfigTest(cros_test_lib.RunCommandTempDirTestCase):
"""SecurityTestConfig class tests."""
# pylint: disable=protected-access
def setUp(self):
self.image = '/path/to/image.bin'
self.baselines = '/path/to/baselines'
self.vboot_hash = 'abc123'
self.config = image_lib.SecurityTestConfig(self.image, self.baselines,
self.vboot_hash, self.tempdir)
def testVbootCheckout(self):
"""Test normal flow - clone and checkout."""
clone_patch = self.PatchObject(git, 'Clone')
self.config._VbootCheckout()
clone_patch.assert_called_once()
self.assertCommandContains(['git', 'checkout', self.vboot_hash])
# Make sure it doesn't try to clone & checkout again after already having
# done so successfully.
clone_patch = self.PatchObject(git, 'Clone')
self.config._VbootCheckout()
clone_patch.assert_not_called()
def testVbootCheckoutError(self):
"""Test exceptions in a git command."""
rce = cros_build_lib.RunCommandError('error')
self.PatchObject(git, 'Clone', side_effect=rce)
with self.assertRaises(image_lib.VbootCheckoutError):
self.config._VbootCheckout()
def testVbootCheckoutNoDirectory(self):
"""Test the error handling when the directory does not exist."""
# Test directory that does not exist.
self.config.directory = '/DOES/NOT/EXIST'
with self.assertRaises(image_lib.SecurityConfigDirectoryError):
self.config._VbootCheckout()
def testRunCheck(self):
"""RunCheck tests."""
# No config argument when running check.
self.config.RunCheck('check1', False)
check1 = os.path.join(self.config._checks_dir, 'ensure_check1.sh')
config1 = os.path.join(self.baselines, 'ensure_check1.config')
self.assertCommandContains([check1, self.image])
self.assertCommandContains([config1], expected=False)
# Include config argument when running check.
self.config.RunCheck('check2', True)
check2 = os.path.join(self.config._checks_dir, 'ensure_check2.sh')
config2 = os.path.join(self.baselines, 'ensure_check2.config')
self.assertCommandContains([check2, self.image, config2])
class GetImageDiskPartitionInfoTests(cros_test_lib.RunCommandTestCase):
"""Tests the GetImageDiskPartitionInfo function."""
SAMPLE_PARTED = """/foo/chromiumos_qemu_image.bin:\
2271240192B:file:512:512:gpt::;
11:32768B:8421375B:8388608B::RWFW:;
6:8421376B:8421887B:512B::KERN-C:;
7:8421888B:8422399B:512B::ROOT-C:;
9:8422400B:8422911B:512B::reserved:;
10:8422912B:8423423B:512B::reserved:;
2:10485760B:27262975B:16777216B::KERN-A:;
4:27262976B:44040191B:16777216B::KERN-B:;
8:44040192B:60817407B:16777216B:ext4:OEM:msftdata;
12:127926272B:161480703B:33554432B:fat16:EFI-SYSTEM:boot, esp;
5:161480704B:163577855B:2097152B::ROOT-B:;
3:163577856B:2260729855B:2097152000B:ext2:ROOT-A:;
1:2260729856B:2271215615B:10485760B:ext2:STATE:msftdata;
"""
SAMPLE_CGPT = """
start size part contents
0 1 PMBR (Boot GUID: 88FB7EB8-2B3F-B943-B933-\
EEC571FFB6E1)
1 1 Pri GPT header
2 32 Pri GPT table
1921024 2097152 1 Label: "STATE"
Type: Linux data
UUID: EEBD83BE-397E-BD44-878B-0DDDD5A5C510
20480 32768 2 Label: "KERN-A"
Type: ChromeOS kernel
UUID: 7007C2F3-08E5-AB40-A4BC-FF5B01F5460D
Attr: priority=15 tries=15 successful=1
1101824 819200 3 Label: "ROOT-A"
Type: ChromeOS rootfs
UUID: F4C5C3AD-027F-894B-80CD-3DEC57932948
53248 32768 4 Label: "KERN-B"
Type: ChromeOS kernel
UUID: C85FB478-404C-8741-ADB8-11312A35880D
Attr: priority=0 tries=0 successful=0
282624 819200 5 Label: "ROOT-B"
Type: ChromeOS rootfs
UUID: A99F4231-1EC3-C542-AC0C-DF3729F5DB07
16448 1 6 Label: "KERN-C"
Type: ChromeOS kernel
UUID: 81F0E336-FAC9-174D-A08C-864FE627B637
Attr: priority=0 tries=0 successful=0
16449 1 7 Label: "ROOT-C"
Type: ChromeOS rootfs
UUID: 9E127FCA-30C1-044E-A5F2-DF74E6932692
86016 32768 8 Label: "OEM"
Type: Linux data
UUID: 72986347-A37C-684F-9A19-4DBAF41C55A9
16450 1 9 Label: "reserved"
Type: ChromeOS reserved
UUID: BA85A0A7-1850-964D-8EF8-6707AC106C3A
16451 1 10 Label: "reserved"
Type: ChromeOS reserved
UUID: 16C9EC9B-50FA-DD46-98DC-F781360817B4
64 16384 11 Label: "RWFW"
Type: ChromeOS firmware
UUID: BE8AECB9-4F78-7C44-8F23-5A9273B7EC8F
249856 32768 12 Label: "EFI-SYSTEM"
Type: EFI System Partition
UUID: 88FB7EB8-2B3F-B943-B933-EEC571FFB6E1
4050847 32 Sec GPT table
4050879 1 Sec GPT header
"""
def testCgpt(self):
"""Tests that we can list all partitions with `cgpt` correctly."""
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT)
partitions = image_lib.GetImageDiskPartitionInfo('...')
part_dict = {p.name: p for p in partitions}
self.assertEqual(part_dict['STATE'].start, 983564288)
self.assertEqual(part_dict['STATE'].size, 1073741824)
self.assertEqual(part_dict['STATE'].number, 1)
self.assertEqual(part_dict['STATE'].name, 'STATE')
self.assertEqual(part_dict['EFI-SYSTEM'].start, 249856 * 512)
self.assertEqual(part_dict['EFI-SYSTEM'].size, 32768 * 512)
self.assertEqual(part_dict['EFI-SYSTEM'].number, 12)
self.assertEqual(part_dict['EFI-SYSTEM'].name, 'EFI-SYSTEM')
self.assertEqual(12, len(partitions))
def testNormalPath(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_PARTED)
partitions = image_lib.GetImageDiskPartitionInfo('_ignored')
part_dict = {p.name: p for p in partitions}
self.assertEqual(12, len(partitions))
self.assertEqual(1, part_dict['STATE'].number)
self.assertEqual(2097152000, part_dict['ROOT-A'].size)
def testKeyedByNumber(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_PARTED)
partitions = image_lib.GetImageDiskPartitionInfo(
'_ignored'
)
part_dict = {p.number: p for p in partitions}
self.assertEqual(12, len(part_dict))
self.assertEqual('STATE', part_dict[1].name)
self.assertEqual(2097152000, part_dict[3].size)
self.assertEqual('reserved', part_dict[9].name)
self.assertEqual('reserved', part_dict[10].name)
def testChangeUnitInsideChroot(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT)
partitions = image_lib.GetImageDiskPartitionInfo('_ignored')
part_dict = {p.name: p for p in partitions}
self.assertEqual(part_dict['STATE'].start, 983564288)
self.assertEqual(part_dict['STATE'].size, 1073741824)
| bsd-3-clause | -1,855,077,315,737,941,500 | 42.074576 | 80 | 0.638939 | false | 3.367877 | true | false | false |
pygeek/django | django/contrib/gis/db/backends/postgis/creation.py | 1 | 3881 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_opts = ''
elif self.connection.ops.spatial_version >= (2, 0):
if f.dim > 2:
index_opts = ' ' + style.SQL_KEYWORD('gist_geometry_ops_nd')
else:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
cursor = self.connection.cursor()
cursor.execute('SELECT datname FROM pg_database;')
db_names = [row[0] for row in cursor.fetchall()]
postgis_template = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
if postgis_template in db_names:
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(postgis_template)
elif self.connection.ops.spatial_version < (2, 0):
raise ImproperlyConfigured("Template database '%s' does not exist." % postgis_template)
else:
return ''
| bsd-3-clause | -6,418,817,060,973,817,000 | 48.126582 | 99 | 0.509147 | false | 4.35578 | false | false | false |
sciencewiz1/datahub | src/datahub/browser/views.py | 1 | 1691 | import json, sys, re, hashlib, smtplib, base64, urllib, os
from auth import *
from core.account import manager
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
'''
@author: Anant Bhardwaj
@date: Mar 21, 2013
Datahub Web Handler
'''
@login_required
def user(request, username=None):
try:
if(username):
res = manager.list_databases(username)
return render_to_response("user.html", {'username': username, 'db_names':res['db_names']})
else:
user = request.session[kLogIn]
return HttpResponseRedirect(user)
except KeyError:
return HttpResponseRedirect('/login')
def new_database_form(request, username):
return render_to_response("new_database.html", {'username': username})
@login_required
def new_database(request, username, db_name):
manager.create_database(username, db_name)
return HttpResponseRedirect("/"+username)
@login_required
def database(request, username, db_name):
try:
res = manager.list_tables(db_name)
return render_to_response("database.html", {'username': username, 'db_name':db_name, 'table_names':res['table_names']})
except Exception, e:
return HttpResponse(request_error, mimetype="application/json")
@login_required
def table(request, username, db_name, table_name):
try:
return render_to_response("table.html", {'username': username, 'db_name':db_name, 'table_name':table_name})
except Exception, e:
return HttpResponse(request_error, mimetype="application/json")
| mit | -9,209,395,502,883,774,000 | 28.666667 | 121 | 0.752218 | false | 3.443992 | false | false | false |
downquark/algorithms | square_root.py | 1 | 1141 | from math import floor
def sqrt(S):
"""Given an integer, return the square root.
A continued fraction expansion implementation.
https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Continued_fraction_expansion
Args:
S: Any natural number
"""
i = 0
s = 1
if S == 0 or S == 1: return S
while s ** 2 < S:
if i ** 2 == S:
return i
s = s * 2
i += 1
return __search((s / 2), s, S)
def __search(i, k, S):
j = i + ((k - i) / 2)
s = j ** 2
if s == S:
return j
elif k == i + 1:
return __continued_fraction(S, [j], 1, 0)
elif s > S:
return __search(i, j, S)
elif s < S:
return __search(j, k, S)
def __continued_fraction(S, a, d_n, m_n):
n = len(a) - 1
m_1 = (d_n * a[n]) - m_n
d_1 = (S - m_1 ** 2) / d_n
a_1 = int(floor((a[0] + m_1) / d_1))
a.append(a_1)
if a_1 != 2 * a[0] and len(a) < 11:
return __continued_fraction(S, a, d_1, m_1)
else:
result = 1.0
while len(a):
result = a.pop() + (1 / result)
return result | gpl-2.0 | 7,549,270,143,950,171,000 | 24.377778 | 100 | 0.468887 | false | 2.874055 | false | false | false |
brucework/demo | src/python/hello/objvar.py | 1 | 1332 | #!/usr/bin/python
#Filename: objvar.py
class Person:
"'Represents a person.'"
population = 0
def __init__(self, name):
''''Initializes the person's data.'''
self.name = name
print '(Initializing %s)'% self.name
#When this person is created, he/she
#adds to the population
Person.population += 1
def __del__(self):
"I am dying."
print '%s says bye.' %self.name
Person.population -= 1
if Person.population == 0:
print 'I am the last one.'
else:
print 'There are still %d people left.'%Person.population
def sayHi(self):
'''Greeting by the person.
Really, that's all it does.'''
print 'Hi, my name is %s.'%self.name
def howMany(self):
'''Prints the current population.'''
if Person.population == 1:
print 'I am the only person here.'
else:
print 'We have %d personshere.' %Person.population
swaroop = Person('Swaroop')
swaroop.sayHi()
swaroop.howMany()
bruce = Person('bruce A')
bruce.sayHi()
bruce.howMany()
test = Person('test 120')
test.sayHi()
test.howMany()
del test
kalam = Person('A bdul K alam')
kalam.sayHi()
kalam.howMany()
del swaroop
del bruce
del kalam
#swaroop.sayHi()
#swaroop.howMany()
| gpl-3.0 | 8,220,158,943,830,193,000 | 19.492308 | 69 | 0.583333 | false | 3.380711 | false | false | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/tutorials/addressbook/part4.py | 1 | 9061 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## Contact: Qt Software Information ([email protected])
##
## This file is part of the example classes of the Qt Toolkit.
##
#############################################################################
from PySide import QtCore, QtGui
class SortedDict(dict):
class Iterator(object):
def __init__(self, sorted_dict):
self._dict = sorted_dict
self._keys = sorted(self._dict.keys())
self._nr_items = len(self._keys)
self._idx = 0
def __iter__(self):
return self
def next(self):
if self._idx >= self._nr_items:
raise StopIteration
key = self._keys[self._idx]
value = self._dict[key]
self._idx += 1
return key, value
__next__ = next
def __iter__(self):
return SortedDict.Iterator(self)
iterkeys = __iter__
class AddressBook(QtGui.QWidget):
NavigationMode, AddingMode, EditingMode = range(3)
def __init__(self, parent=None):
super(AddressBook, self).__init__(parent)
self.contacts = SortedDict()
self.oldName = ''
self.oldAddress = ''
self.currentMode = self.NavigationMode
nameLabel = QtGui.QLabel("Name:")
self.nameLine = QtGui.QLineEdit()
self.nameLine.setReadOnly(True)
addressLabel = QtGui.QLabel("Address:")
self.addressText = QtGui.QTextEdit()
self.addressText.setReadOnly(True)
self.addButton = QtGui.QPushButton("&Add")
self.addButton.show()
self.editButton = QtGui.QPushButton("&Edit")
self.editButton.setEnabled(False)
self.removeButton = QtGui.QPushButton("&Remove")
self.removeButton.setEnabled(False)
self.submitButton = QtGui.QPushButton("&Submit")
self.submitButton.hide()
self.cancelButton = QtGui.QPushButton("&Cancel")
self.cancelButton.hide()
self.nextButton = QtGui.QPushButton("&Next")
self.nextButton.setEnabled(False)
self.previousButton = QtGui.QPushButton("&Previous")
self.previousButton.setEnabled(False)
self.addButton.clicked.connect(self.addContact)
self.submitButton.clicked.connect(self.submitContact)
self.editButton.clicked.connect(self.editContact)
self.removeButton.clicked.connect(self.removeContact)
self.cancelButton.clicked.connect(self.cancel)
self.nextButton.clicked.connect(self.next)
self.previousButton.clicked.connect(self.previous)
buttonLayout1 = QtGui.QVBoxLayout()
buttonLayout1.addWidget(self.addButton)
buttonLayout1.addWidget(self.editButton)
buttonLayout1.addWidget(self.removeButton)
buttonLayout1.addWidget(self.submitButton)
buttonLayout1.addWidget(self.cancelButton)
buttonLayout1.addStretch()
buttonLayout2 = QtGui.QHBoxLayout()
buttonLayout2.addWidget(self.previousButton)
buttonLayout2.addWidget(self.nextButton)
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(nameLabel, 0, 0)
mainLayout.addWidget(self.nameLine, 0, 1)
mainLayout.addWidget(addressLabel, 1, 0, QtCore.Qt.AlignTop)
mainLayout.addWidget(self.addressText, 1, 1)
mainLayout.addLayout(buttonLayout1, 1, 2)
mainLayout.addLayout(buttonLayout2, 3, 1)
self.setLayout(mainLayout)
self.setWindowTitle("Simple Address Book")
def addContact(self):
self.oldName = self.nameLine.text()
self.oldAddress = self.addressText.toPlainText()
self.nameLine.clear()
self.addressText.clear()
self.updateInterface(self.AddingMode)
def editContact(self):
self.oldName = self.nameLine.text()
self.oldAddress = self.addressText.toPlainText()
self.updateInterface(self.EditingMode)
def submitContact(self):
name = self.nameLine.text()
address = self.addressText.toPlainText()
if name == "" or address == "":
QtGui.QMessageBox.information(self, "Empty Field",
"Please enter a name and address.")
return
if self.currentMode == self.AddingMode:
if name not in self.contacts:
self.contacts[name] = address
QtGui.QMessageBox.information(self, "Add Successful",
"\"%s\" has been added to your address book." % name)
else:
QtGui.QMessageBox.information(self, "Add Unsuccessful",
"Sorry, \"%s\" is already in your address book." % name)
return
elif self.currentMode == self.EditingMode:
if self.oldName != name:
if name not in self.contacts:
QtGui.QMessageBox.information(self, "Edit Successful",
"\"%s\" has been edited in your address book." % self.oldName)
del self.contacts[self.oldName]
self.contacts[name] = address
else:
QtGui.QMessageBox.information(self, "Edit Unsuccessful",
"Sorry, \"%s\" is already in your address book." % name)
return
elif self.oldAddress != address:
QtGui.QMessageBox.information(self, "Edit Successful",
"\"%s\" has been edited in your address book." % name)
self.contacts[name] = address
self.updateInterface(self.NavigationMode)
def cancel(self):
self.nameLine.setText(self.oldName)
self.addressText.setText(self.oldAddress)
self.updateInterface(self.NavigationMode)
def removeContact(self):
name = self.nameLine.text()
address = self.addressText.toPlainText()
if name in self.contacts:
button = QtGui.QMessageBox.question(self, "Confirm Remove",
"Are you sure you want to remove \"%s\"?" % name,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if button == QtGui.QMessageBox.Yes:
self.previous()
del self.contacts[name]
QtGui.QMessageBox.information(self, "Remove Successful",
"\"%s\" has been removed from your address book." % name)
self.updateInterface(self.NavigationMode)
def next(self):
name = self.nameLine.text()
it = iter(self.contacts)
try:
while True:
this_name, _ = it.next()
if this_name == name:
next_name, next_address = it.next()
break
except StopIteration:
next_name, next_address = iter(self.contacts).next()
self.nameLine.setText(next_name)
self.addressText.setText(next_address)
def previous(self):
name = self.nameLine.text()
prev_name = prev_address = None
for this_name, this_address in self.contacts:
if this_name == name:
break
prev_name = this_name
prev_address = this_address
else:
self.nameLine.clear()
self.addressText.clear()
return
if prev_name is None:
for prev_name, prev_address in self.contacts:
pass
self.nameLine.setText(prev_name)
self.addressText.setText(prev_address)
def updateInterface(self, mode):
self.currentMode = mode
if self.currentMode in (self.AddingMode, self.EditingMode):
self.nameLine.setReadOnly(False)
self.nameLine.setFocus(QtCore.Qt.OtherFocusReason)
self.addressText.setReadOnly(False)
self.addButton.setEnabled(False)
self.editButton.setEnabled(False)
self.removeButton.setEnabled(False)
self.nextButton.setEnabled(False)
self.previousButton.setEnabled(False)
self.submitButton.show()
self.cancelButton.show()
elif self.currentMode == self.NavigationMode:
if not self.contacts:
self.nameLine.clear()
self.addressText.clear()
self.nameLine.setReadOnly(True)
self.addressText.setReadOnly(True)
self.addButton.setEnabled(True)
number = len(self.contacts)
self.editButton.setEnabled(number >= 1)
self.removeButton.setEnabled(number >= 1)
self.nextButton.setEnabled(number > 1)
self.previousButton.setEnabled(number >1 )
self.submitButton.hide()
self.cancelButton.hide()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
addressBook = AddressBook()
addressBook.show()
sys.exit(app.exec_())
| epl-1.0 | 8,617,441,876,815,813,000 | 32.684015 | 90 | 0.584152 | false | 4.354157 | false | false | false |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/ir/rule.py | 1 | 10814 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from ..model import ModelView, ModelSQL, fields, EvalEnvironment, Check
from ..transaction import Transaction
from ..cache import Cache
from ..pool import Pool
from .. import backend
from ..pyson import PYSONDecoder
__all__ = [
'RuleGroup', 'Rule',
]
class RuleGroup(ModelSQL, ModelView):
"Rule group"
__name__ = 'ir.rule.group'
name = fields.Char('Name', select=True)
model = fields.Many2One('ir.model', 'Model', select=True,
required=True)
global_p = fields.Boolean('Global', select=True,
help="Make the rule global \nso every users must follow this rule")
default_p = fields.Boolean('Default', select=True,
help="Add this rule to all users by default")
rules = fields.One2Many('ir.rule', 'rule_group', 'Tests',
help="The rule is satisfied if at least one test is True")
groups = fields.Many2Many('ir.rule.group-res.group',
'rule_group', 'group', 'Groups')
# TODO remove to only use groups
users = fields.Many2Many('ir.rule.group-res.user',
'rule_group', 'user', 'Users')
perm_read = fields.Boolean('Read Access')
perm_write = fields.Boolean('Write Access')
perm_create = fields.Boolean('Create Access')
perm_delete = fields.Boolean('Delete Access')
@classmethod
def __setup__(cls):
super(RuleGroup, cls).__setup__()
cls._order.insert(0, ('model', 'ASC'))
cls._order.insert(1, ('global_p', 'ASC'))
cls._order.insert(2, ('default_p', 'ASC'))
t = cls.__table__()
cls._sql_constraints += [
('global_default_exclusive',
Check(t, (t.global_p == False) | (t.default_p == False)),
'Global and Default are mutually exclusive!'),
]
@staticmethod
def default_global_p():
return True
@staticmethod
def default_default_p():
return False
@staticmethod
def default_perm_read():
return True
@staticmethod
def default_perm_write():
return True
@staticmethod
def default_perm_create():
return True
@staticmethod
def default_perm_delete():
return True
@classmethod
def delete(cls, groups):
super(RuleGroup, cls).delete(groups)
# Restart the cache on the domain_get method of ir.rule
Pool().get('ir.rule')._domain_get_cache.clear()
@classmethod
def create(cls, vlist):
res = super(RuleGroup, cls).create(vlist)
# Restart the cache on the domain_get method of ir.rule
Pool().get('ir.rule')._domain_get_cache.clear()
return res
@classmethod
def write(cls, groups, vals, *args):
super(RuleGroup, cls).write(groups, vals, *args)
# Restart the cache on the domain_get method of ir.rule
Pool().get('ir.rule')._domain_get_cache.clear()
class Rule(ModelSQL, ModelView):
"Rule"
__name__ = 'ir.rule'
rule_group = fields.Many2One('ir.rule.group', 'Group', select=True,
required=True, ondelete="CASCADE")
domain = fields.Char('Domain', required=True,
help='Domain is evaluated with a PYSON context containing:\n'
'- "user" as the current user')
_domain_get_cache = Cache('ir_rule.domain_get', context=False)
@classmethod
def __setup__(cls):
super(Rule, cls).__setup__()
cls._error_messages.update({
'invalid_domain': 'Invalid domain in rule "%s".',
})
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
super(Rule, cls).__register__(module_name)
table = TableHandler(Transaction().cursor, cls, module_name)
# Migration from 2.6: replace field, operator and operand by domain
table.not_null_action('field', action='remove')
table.not_null_action('operator', action='remove')
table.not_null_action('operand', action='remove')
@classmethod
def validate(cls, rules):
super(Rule, cls).validate(rules)
cls.check_domain(rules)
@classmethod
def check_domain(cls, rules):
ctx = cls._get_context()
for rule in rules:
try:
value = PYSONDecoder(ctx).decode(rule.domain)
except Exception:
cls.raise_user_error('invalid_domain', (rule.rec_name,))
if not isinstance(value, list):
cls.raise_user_error('invalid_domain', (rule.rec_name,))
else:
try:
fields.domain_validate(value)
except Exception:
cls.raise_user_error('invalid_domain', (rule.rec_name,))
@staticmethod
def _get_context():
User = Pool().get('res.user')
user_id = Transaction().user
with Transaction().set_context(_check_access=False, _datetime=None):
user = EvalEnvironment(User(user_id), User)
return {
'user': user,
}
@staticmethod
def _get_cache_key():
# _datetime value will be added to the domain
return (Transaction().user, Transaction().context.get('_datetime'))
@classmethod
def domain_get(cls, model_name, mode='read'):
assert mode in ['read', 'write', 'create', 'delete'], \
'Invalid domain mode for security'
# root user above constraint
if Transaction().user == 0:
if not Transaction().context.get('user'):
return
with Transaction().set_user(Transaction().context['user']):
return cls.domain_get(model_name, mode=mode)
key = (model_name, mode) + cls._get_cache_key()
domain = cls._domain_get_cache.get(key, False)
if domain is not False:
return domain
pool = Pool()
RuleGroup = pool.get('ir.rule.group')
Model = pool.get('ir.model')
RuleGroup_User = pool.get('ir.rule.group-res.user')
RuleGroup_Group = pool.get('ir.rule.group-res.group')
User_Group = pool.get('res.user-res.group')
cursor = Transaction().cursor
rule_table = cls.__table__()
rule_group = RuleGroup.__table__()
rule_group_user = RuleGroup_User.__table__()
rule_group_group = RuleGroup_Group.__table__()
user_group = User_Group.__table__()
model = Model.__table__()
user_id = Transaction().user
cursor.execute(*rule_table.join(rule_group,
condition=rule_group.id == rule_table.rule_group
).join(model,
condition=rule_group.model == model.id
).select(rule_table.id,
where=(model.model == model_name)
& (getattr(rule_group, 'perm_%s' % mode) == True)
& (rule_group.id.in_(
rule_group_user.select(rule_group_user.rule_group,
where=rule_group_user.user == user_id)
| rule_group_group.join(
user_group,
condition=(rule_group_group.group
== user_group.group)
).select(rule_group_group.rule_group,
where=user_group.user == user_id)
)
| (rule_group.default_p == True)
| (rule_group.global_p == True)
)))
ids = [x[0] for x in cursor.fetchall()]
if not ids:
cls._domain_get_cache.set(key, None)
return
clause = {}
clause_global = {}
ctx = cls._get_context()
# Use root user without context to prevent recursion
with Transaction().set_user(0), \
Transaction().set_context(user=0):
for rule in cls.browse(ids):
assert rule.domain, ('Rule domain empty,'
'check if migration was done')
dom = PYSONDecoder(ctx).decode(rule.domain)
if rule.rule_group.global_p:
clause_global.setdefault(rule.rule_group.id, ['OR'])
clause_global[rule.rule_group.id].append(dom)
else:
clause.setdefault(rule.rule_group.id, ['OR'])
clause[rule.rule_group.id].append(dom)
# Test if there is no rule_group that have no rule
cursor.execute(*rule_group.join(model,
condition=rule_group.model == model.id
).select(rule_group.id,
where=(model.model == model_name)
& ~rule_group.id.in_(rule_table.select(rule_table.rule_group))
& rule_group.id.in_(rule_group_user.select(
rule_group_user.rule_group,
where=rule_group_user.user == user_id)
| rule_group_group.join(user_group,
condition=rule_group_group.group == user_group.group
).select(rule_group_group.rule_group,
where=user_group.user == user_id))))
fetchone = cursor.fetchone()
if fetchone:
group_id = fetchone[0]
clause[group_id] = []
clause = clause.values()
if clause:
clause.insert(0, 'OR')
clause_global = clause_global.values()
if clause_global:
clause_global.insert(0, 'AND')
if clause and clause_global:
clause = ['AND', clause_global, clause]
elif clause_global:
clause = clause_global
cls._domain_get_cache.set(key, clause)
return clause
@classmethod
def query_get(cls, model_name, mode='read'):
pool = Pool()
Model = pool.get(model_name)
domain = cls.domain_get(model_name, mode=mode)
# Use root to prevent infinite recursion
with Transaction().set_user(0), \
Transaction().set_context(active_test=False, user=0):
return Model.search(domain, order=[], query=True)
@classmethod
def delete(cls, rules):
super(Rule, cls).delete(rules)
# Restart the cache on the domain_get method of ir.rule
cls._domain_get_cache.clear()
@classmethod
def create(cls, vlist):
res = super(Rule, cls).create(vlist)
# Restart the cache on the domain_get method of ir.rule
cls._domain_get_cache.clear()
return res
@classmethod
def write(cls, rules, vals, *args):
super(Rule, cls).write(rules, vals, *args)
# Restart the cache on the domain_get method
cls._domain_get_cache.clear()
| gpl-3.0 | 1,320,602,009,460,516,000 | 35.657627 | 78 | 0.561309 | false | 4.059309 | false | false | false |
opnsense/core | src/opnsense/scripts/ipsec/list_status.py | 1 | 3842 | #!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
list ipsec status, using vici interface
"""
import sys
import socket
import ujson
import vici
try:
s = vici.Session()
except socket.error:
# cannot connect to session, strongswan not running?
print ('ipsec not active')
sys.exit(0)
def parse_sa(in_conn):
result = {'local-addrs': '', 'remote-addrs': '', 'children': '', 'local-id': '', 'remote-id': ''}
result['version'] = in_conn['version']
if 'local_addrs' in in_conn:
result['local-addrs'] = b','.join(in_conn['local_addrs'])
elif 'local-host' in in_conn:
result['local-addrs'] = in_conn['local-host']
if 'remote_addrs' in in_conn:
result['remote-addrs'] = b','.join(in_conn['remote_addrs'])
elif 'remote-host' in in_conn:
result['remote-addrs'] = in_conn['remote-host']
if 'children' in in_conn:
result['children'] = in_conn['children']
result['sas'] = []
return result
result = dict()
# parse connections
for conns in s.list_conns():
for connection_id in conns:
result[connection_id] = parse_sa(conns[connection_id])
result[connection_id]['routed'] = True
result[connection_id]['local-class'] = []
result[connection_id]['remote-class'] = []
# parse local-% and remote-% keys
for connKey in conns[connection_id].keys():
if connKey.find('local-') == 0:
if 'id' in conns[connection_id][connKey]:
result[connection_id]['local-id'] = conns[connection_id][connKey]['id']
result[connection_id]['local-class'].append(conns[connection_id][connKey]['class'])
elif connKey.find('remote-') == 0:
if 'id' in conns[connection_id][connKey]:
result[connection_id]['remote-id'] = conns[connection_id][connKey]['id']
result[connection_id]['remote-class'].append(conns[connection_id][connKey]['class'])
result[connection_id]['local-class'] = b'+'.join(result[connection_id]['local-class'])
result[connection_id]['remote-class'] = b'+'.join(result[connection_id]['remote-class'])
# attach Security Associations
for sas in s.list_sas():
for sa in sas:
if sa not in result:
result[sa] = parse_sa(sas[sa])
result[sa]['routed'] = False
result[sa]['sas'].append(sas[sa])
print (ujson.dumps(result, reject_bytes=False))
| bsd-2-clause | -7,422,858,684,513,032,000 | 41.688889 | 101 | 0.640552 | false | 4.027254 | false | false | false |
portnov/sverchok | nodes/network/udp_client.py | 1 | 3820 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import socket
import bpy
from bpy.props import IntProperty, FloatProperty, EnumProperty, StringProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode, StringsSocket
from sverchok.utils.profile import profile
from sverchok.data_structure import updateNode
class UdpClientNode(bpy.types.Node, SverchCustomTreeNode):
bl_idname = 'UdpClientNode'
bl_label = 'UDP Client'
def send_msg(self, context):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.sendto(bytes(self.send, 'UTF-8'), (self.ip, self.port))
def recv_msg(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.settimeout(self.timeout)
try:
data, _ = sock.recvfrom(self.buffer_size)
self.receive = data.decode('UTF-8')
except socket.timeout:
print('Timeout')
send = StringProperty(name='send',
description='Message to send',
default='message',
update=send_msg)
receive = StringProperty(name='receive',
description='Received message',
default='',
update=updateNode)
ip = StringProperty(name='ip',
description='IP address of server',
default='127.0.0.1')
port = IntProperty(name='port',
description='Port number to send message',
default=9250)
buffer_size = IntProperty(name='buffer_size',
description='Size of buffer',
default=8192)
timeout = FloatProperty(name='timeout',
description='Timeout (sec)',
default=0.5)
active = BoolProperty(default=False, name='Active')
def draw_buttons(self, context, layout):
layout.prop(self, 'active', text='Active')
layout.prop(self, 'ip', text='IP')
layout.prop(self, 'port', text='Port')
layout.prop(self, 'buffer_size', text='Buffer')
layout.prop(self, 'timeout', text='Timeout')
def sv_init(self, context):
self.inputs.new('StringsSocket', 'send', 'send').prop_name = 'send'
self.outputs.new('StringsSocket', 'receive', 'receive')
@profile
def process(self):
if not self.active:
return
print(type(self.send),type(self.ip),type(self.port))
input_value = self.inputs[0].sv_get()
if self.send != str(input_value):
self.send = str(input_value)
#self.send_msg(bpy.context)
if self.outputs['receive'].is_linked:
self.recv_msg()
self.outputs['receive'].sv_set(self.receive)
def register():
bpy.utils.register_class(UdpClientNode)
def unregister():
bpy.utils.unregister_class(UdpClientNode)
if __name__ == '__main__':
register() | gpl-3.0 | 7,752,315,496,420,771,000 | 32.517544 | 92 | 0.605236 | false | 4.094319 | false | false | false |
rlowrance/mlpack | minimize_1d_golden_section.py | 1 | 1783 | '''golden section search to minimize a function of one variable in [low,high]
NOTE: the function fun is assumed to be unimodal
RETURN
low
high, such that the minimizer is in [low,high]
dict, dictionary with function evalutions dict[x] = f(x)
ARGS
fun(x) -> number
low -> number
high -> number
tolerance -> number, required absolute precision of fun(x)
ref: heath-02 p.270 golden section search
'''
import math
import pdb
import unittest
def golden_section(fun, low, high, tolerance, verbose=False):
assert low < high
d = {}
def func(x):
result = fun(x)
d[x] = result
return result
tau = (math.sqrt(5.0) - 1) / 2.0
x1 = low + (1 - tau) * (high - low)
f1 = func(x1)
x2 = low + tau * (high - low)
f2 = func(x2)
while (high - low) > tolerance:
if verbose:
print x1, f1, x2, f2
if f1 > f2:
low = x1
x1 = x2
f1 = f2
x2 = low + tau * (high - low)
f2 = func(x2)
else:
high = x2
x2 = x1
f2 = f1
x1 = low + (1 - tau) * (high - low)
f1 = func(x1)
return low, high, d
class Test(unittest.TestCase):
def setUp(self):
self.verbose = False
def test(self):
# from heath-02 p. 272
def fun(x):
return 0.5 - x * math.exp(- x * x)
low_star, high_star, d = \
golden_section(fun, 0.0, 2.0, .001, verbose=self.verbose)
if self.verbose:
print 'low_star', low_star, 'high_star', high_star
self.assertLess(abs(low_star - .706565), 1e-3)
self.assertLess(abs(high_star - .707471), 1e-3)
if __name__ == '__main__':
if False:
pdb.set_trace()
unittest.main()
| mit | 3,258,253,841,374,939,000 | 22.773333 | 77 | 0.53281 | false | 3.178253 | true | false | false |
jdepoix/goto_cloud | goto_cloud/status_model/models.py | 1 | 2281 | from abc import abstractmethod
from django.db import models
from enums.public import StringEnum
from tracked_model.public import TrackedModel
from .lifecycle_management import ObjectStatusLifecycleManager, StatusLifecycle
class StatusModel(TrackedModel):
"""
This Model can be inherited by models which have a status in a lifecycle. The property model.lifecycle_manager
returns a ObjectStatusLifecycleManager containing the relevant lifecycle.
"""
class InvalidStatusException(Exception):
"""
raise if a status is invalid
"""
pass
class Status(StringEnum):
pass
@property
@abstractmethod
def lifecycle(self):
"""
:return: the lifecycle of statuses this StatusModel relies on
:rtype: tuple
"""
raise NotImplementedError('implement abstractproperty lifecycle!')
def __init__(self, *args, **kwargs):
self._status_lifecycle = StatusLifecycle(*self.lifecycle)
self._lifecycle_manager = ObjectStatusLifecycleManager(self._status_lifecycle, self, 'status')
self._meta.get_field('status').default = self._status_lifecycle.statuses[0]
self._meta.get_field('status').choices = self.Status.get_django_choices()
super().__init__(*args, **kwargs)
def save(self, *args, **kwargs):
if not self._lifecycle_manager.is_status_valid(self.status):
raise StatusModel.InvalidStatusException('status: {status} is not valid'.format(
status=self.status
))
return super().save(*args, **kwargs)
status = models.CharField(max_length=255)
def increment_status(self):
"""
increments the status of this StatusModel
:raises: ObjectStatusLifecycleManager.InvalidStatusException in case there is no next status
"""
self.status = self._lifecycle_manager.get_next_status()
self.save()
def decrement_status(self):
"""
decrements the status of this StatusModel
:raises: ObjectStatusLifecycleManager.InvalidStatusException in case there is no previous status
"""
self.status = self._lifecycle_manager.get_previous_status()
self.save()
class Meta:
abstract = True
| mit | 7,181,498,220,778,321,000 | 31.126761 | 114 | 0.665498 | false | 4.378119 | false | false | false |
GorillaNation/pipestash | pipestash/__init__.py | 1 | 3148 | import socket
import optparse
def parseargs():
def parse_field_args(option, opt_str, value, parser):
args=[]
for arg in parser.rargs:
if arg[0] != "-":
args.append(arg)
else:
del parser.rargs[:len(args)]
break
if getattr(parser.values, option.dest):
args.extend(getattr(parser.values, option.dest))
setattr(parser.values, option.dest, args)
# parse command line
parser = optparse.OptionParser()
parser.add_option('-t', '--type', dest='type', help='the event type (required)')
parser.add_option('-r','--redis-url', dest='redis_url', help="specify the URL of the redis database to use, defaults to redis://localhost:6379/0", default='redis://localhost:6379/0')
parser.add_option('-R', '--redis-key', dest='redis_key', help="redis key to add events to, defaults to logstash", default='logstash')
parser.add_option('-T','--tags', dest='tags', action='callback', callback=parse_field_args, help="tags to add to the event", default=[])
parser.add_option('-f', '--fields', dest='fields', action='callback', callback=parse_field_args, metavar='field=value', help="fields to add to the event, FIELD=VALUE, separated by spaces", default=[])
parser.add_option('-s', '--source-path', dest='source_path', help="specify the @source_path field, defaults to 'stdin'", default='stdin')
parser.add_option('-S', '--source-host', dest='source_host', help="specify the @source_host field, defaults to the machine's FQDN", default=socket.getfqdn())
parser.add_option('-O', '--stdout', dest='stdout', help="print read lines to stdout as well as to redis", action="store_true")
parser.add_option('-v', '--verbose', dest='verbose', help="enable verbose mode", action="store_true")
parser.add_option('-q', '--queue-size', dest='queue_size', help="set the maximum size for the internal queue in number of messages, defaults to 10000", default=10000, type="int")
parser.add_option('-B', '--block', dest='block', help="block reads if the queue is full. defaults to False", default=False, action='store_true')
parser.add_option('-w', '--timeout', dest='timeout', help="if pipestash is unable to connect to redis or redis runs OOM, put the consumer thread to sleep a random amount of time between `-w seconds` and +0 seconds. defaults to 20 seconds", default=20, type="float")
parser.add_option('-n', '--nice', dest='nice', help="sets the niceness value of the process", default=5, type="int")
options, _ = parser.parse_args()
# required fields validation
if not options.type:
parser.error('-t|--type is a required argument')
# set source
options.source = "file:///{0}/{1}".format(options.source_host, options.source_path)
# parse out fields
fields = {}
for fieldargs in options.fields:
a,_,b = fieldargs.partition("=")
fields[a] = b
options.fields = fields
# verbose output
if options.verbose:
def verbose(s):
print >> sys.stderr, s
else:
def verbose(s):
pass
return options
| bsd-3-clause | 7,890,449,419,665,262,000 | 55.214286 | 269 | 0.645172 | false | 3.839024 | false | false | false |
sfriesel/suds | suds/xsd/sxbuiltin.py | 1 | 6653 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{sxbuiltin} module provides classes that represent
XSD I{builtin} schema objects.
"""
from suds import *
from suds.xsd import *
from suds.sax.date import *
from suds.xsd.sxbase import XBuiltin
import datetime as dt
from logging import getLogger
log = getLogger(__name__)
class XString(XBuiltin):
"""
Represents an (xsd) <xs:string/> node
"""
pass
class XAny(XBuiltin):
"""
Represents an (xsd) <any/> node
"""
def __init__(self, schema, name):
XBuiltin.__init__(self, schema, name)
self.nillable = False
def get_child(self, name):
child = XAny(self.schema, name)
return child, []
def any(self):
return True
class XBoolean(XBuiltin):
"""
Represents an (xsd) boolean builtin type.
"""
translation = ({'1':True, 'true':True, '0':False, 'false':False},
{True:'true', 1:'true', False:'false', 0:'false'})
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring):
return XBoolean.translation[0].get(value)
else:
if isinstance(value, (bool, int)):
return XBoolean.translation[1].get(value)
return value
class XInteger(XBuiltin):
"""
Represents an (xsd) xs:int builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return int(value)
else:
if isinstance(value, int):
return str(value)
return value
class XLong(XBuiltin):
"""
Represents an (xsd) xs:long builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return long(value)
else:
if isinstance(value, (int, long)):
return str(value)
return value
class XFloat(XBuiltin):
"""
Represents an (xsd) xs:float builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return float(value)
else:
if isinstance(value, float):
return str(value)
return value
class XDate(XBuiltin):
"""
Represents an (xsd) xs:date builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Date(value).date
else:
if isinstance(value, dt.date):
return str(Date(value))
return value
class XTime(XBuiltin):
"""
Represents an (xsd) xs:time builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Time(value).time
else:
if isinstance(value, dt.date):
return str(Time(value))
return value
class XDateTime(XBuiltin):
"""
Represents an (xsd) xs:datetime builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return DateTime(value).datetime
else:
if isinstance(value, dt.date):
return str(DateTime(value))
return value
class Factory:
tags =\
{
# any
'anyType' : XAny,
# strings
'string' : XString,
'normalizedString' : XString,
'ID' : XString,
'Name' : XString,
'QName' : XString,
'NCName' : XString,
'anySimpleType' : XString,
'anyURI' : XString,
'NOTATION' : XString,
'token' : XString,
'language' : XString,
'IDREFS' : XString,
'ENTITIES' : XString,
'IDREF' : XString,
'ENTITY' : XString,
'NMTOKEN' : XString,
'NMTOKENS' : XString,
# binary
'hexBinary' : XString,
'base64Binary' : XString,
# integers
'int' : XInteger,
'integer' : XInteger,
'unsignedInt' : XInteger,
'positiveInteger' : XInteger,
'negativeInteger' : XInteger,
'nonPositiveInteger' : XInteger,
'nonNegativeInteger' : XInteger,
# longs
'long' : XLong,
'unsignedLong' : XLong,
# shorts
'short' : XInteger,
'unsignedShort' : XInteger,
'byte' : XInteger,
'unsignedByte' : XInteger,
# floats
'float' : XFloat,
'double' : XFloat,
'decimal' : XFloat,
# dates & times
'date' : XDate,
'time' : XTime,
'dateTime': XDateTime,
'duration': XString,
'gYearMonth' : XString,
'gYear' : XString,
'gMonthDay' : XString,
'gDay' : XString,
'gMonth' : XString,
# boolean
'boolean' : XBoolean,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name)
if fn is not None:
return fn(schema, name)
return XBuiltin(schema, name)
| lgpl-3.0 | -2,954,499,898,351,151,600 | 25.505976 | 76 | 0.563655 | false | 3.962478 | false | false | false |
datalyze-solutions/pandas-qt | pandasqt/views/DataTableView.py | 1 | 13381 | # -*- coding: utf-8 -*-
from pandasqt.compat import QtCore, QtGui, Qt, Slot, Signal
from pandasqt.models.DataFrameModel import DataFrameModel
from pandasqt.views.EditDialogs import AddAttributesDialog, RemoveAttributesDialog
from pandasqt.views.CustomDelegates import createDelegate
from pandasqt.models.mime import PandasCellPayload, MimeData
from pandasqt.models.SupportedDtypes import SupportedDtypes
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
class DragTable(QtGui.QTableView):
def __init__(self, parent=None):
"""create a table view with the ability to start drag operations"""
super(DragTable, self).__init__(parent)
self.setDragEnabled(True)
def startDrag(self, index):
"""start a drag operation with a PandasCellPayload on defined index.
Args:
index (QModelIndex): model index you want to start the drag operation.
"""
if not index.isValid():
return
dataFrame = self.model().dataFrame()
# get all infos from dataFrame
dfindex = dataFrame.iloc[[index.row()]].index
columnName = dataFrame.columns[index.column()]
dtype = dataFrame[columnName].dtype
value = dataFrame[columnName][dfindex]
# create the mime data
mimePayload = PandasCellPayload(
dfindex,
columnName,
value,
dtype,
hex(id(self.model()))
)
mimeData = MimeData()
mimeData.setData(mimePayload)
# create the drag icon and start drag operation
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
pixmap = QtGui.QPixmap(":/icons/insert-table.png")
drag.setHotSpot(QtCore.QPoint(pixmap.width()/3, pixmap.height()/3))
drag.setPixmap(pixmap)
result = drag.start(Qt.MoveAction)
def mouseMoveEvent(self, event):
super(DragTable, self).mouseMoveEvent(event)
self.startDrag(self.indexAt(event.pos()))
class DataTableWidget(QtGui.QWidget):
"""A Custom widget with a TableView and a toolbar.
This widget shall display all `DataFrameModels` and
enable the editing of this (edit data, adding/removing,
rows/columns).
"""
def __init__(self, parent=None, iconSize=QtCore.QSize(36, 36)):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
iconSize (QSize, optional): Size of edit buttons. Defaults to QSize(36, 36).
"""
super(DataTableWidget, self).__init__(parent)
self._iconSize = iconSize
self.initUi()
def initUi(self):
"""Initalizes the Uuser Interface with all sub widgets.
"""
self.gridLayout = QtGui.QGridLayout(self)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.buttonFrame = QtGui.QFrame(self)
#self.buttonFrame.setMinimumSize(QtCore.QSize(250, 50))
#self.buttonFrame.setMaximumSize(QtCore.QSize(250, 50))
self.buttonFrame.setFrameShape(QtGui.QFrame.NoFrame)
spacerItemButton = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonFrameLayout = QtGui.QGridLayout(self.buttonFrame)
self.buttonFrameLayout.setContentsMargins(0, 0, 0, 0)
self.editButton = QtGui.QToolButton(self.buttonFrame)
self.editButton.setObjectName('editbutton')
self.editButton.setText(self.tr(u'edit'))
self.editButton.setToolTip(self.tr(u'toggle editing mode'))
icon = QtGui.QIcon(QtGui.QPixmap(_fromUtf8(':/icons/document-edit.png')))
self.editButton.setIcon(icon)
self.addColumnButton = QtGui.QToolButton(self.buttonFrame)
self.addColumnButton.setObjectName('addcolumnbutton')
self.addColumnButton.setText(self.tr(u'+col'))
self.addColumnButton.setToolTip(self.tr(u'add new column'))
icon = QtGui.QIcon(QtGui.QPixmap(_fromUtf8(':/icons/edit-table-insert-column-right.png')))
self.addColumnButton.setIcon(icon)
self.addRowButton = QtGui.QToolButton(self.buttonFrame)
self.addRowButton.setObjectName('addrowbutton')
self.addRowButton.setText(self.tr(u'+row'))
self.addRowButton.setToolTip(self.tr(u'add new row'))
icon = QtGui.QIcon(QtGui.QPixmap(_fromUtf8(':/icons/edit-table-insert-row-below.png')))
self.addRowButton.setIcon(icon)
self.removeColumnButton = QtGui.QToolButton(self.buttonFrame)
self.removeColumnButton.setObjectName('removecolumnbutton')
self.removeColumnButton.setText(self.tr(u'-col'))
self.removeColumnButton.setToolTip(self.tr(u'remove a column'))
icon = QtGui.QIcon(QtGui.QPixmap(_fromUtf8(':/icons/edit-table-delete-column.png')))
self.removeColumnButton.setIcon(icon)
self.removeRowButton = QtGui.QToolButton(self.buttonFrame)
self.removeRowButton.setObjectName('removerowbutton')
self.removeRowButton.setText(self.tr(u'-row'))
self.removeRowButton.setToolTip(self.tr(u'remove selected rows'))
icon = QtGui.QIcon(QtGui.QPixmap(_fromUtf8(':/icons/edit-table-delete-row.png')))
self.removeRowButton.setIcon(icon)
self.buttons = [self.editButton, self.addColumnButton, self.addRowButton, self.removeColumnButton, self.removeRowButton]
for index, button in enumerate(self.buttons):
button.setMinimumSize(self._iconSize)
button.setMaximumSize(self._iconSize)
button.setIconSize(self._iconSize)
button.setCheckable(True)
self.buttonFrameLayout.addWidget(button, 0, index, 1, 1)
self.buttonFrameLayout.addItem(spacerItemButton, 0, index+1, 1, 1)
for button in self.buttons[1:]:
button.setEnabled(False)
#self.tableView = QtGui.QTableView(self)
self.tableView = DragTable(self)
self.tableView.setAlternatingRowColors(True)
self.tableView.setSortingEnabled(True)
self.gridLayout.addWidget(self.buttonFrame, 0, 0, 1, 1)
self.gridLayout.addWidget(self.tableView, 1, 0, 1, 1)
self.editButton.toggled.connect(self.enableEditing)
self.addColumnButton.toggled.connect(self.showAddColumnDialog)
self.addRowButton.toggled.connect(self.addRow)
self.removeRowButton.toggled.connect(self.removeRow)
self.removeColumnButton.toggled.connect(self.showRemoveColumnDialog)
def setButtonsVisible(self, visible):
"""hide/show the edit buttons"""
self.buttonFrame.setVisible(visible)
@Slot(bool)
def enableEditing(self, enabled):
"""Enable the editing buttons to add/remove rows/columns and to edit the data.
This method is also a slot.
In addition, the data of model will be made editable,
if the `enabled` parameter is true.
Args:
enabled (bool): This flag indicates, if the buttons
shall be activated.
"""
for button in self.buttons[1:]:
button.setEnabled(enabled)
if button.isChecked():
button.setChecked(False)
model = self.tableView.model()
if model is not None:
model.enableEditing(enabled)
@Slot()
def uncheckButton(self):
"""Removes the checked stated of all buttons in this widget.
This method is also a slot.
"""
#for button in self.buttons[1:]:
for button in self.buttons:
# supress editButtons toggled event
button.blockSignals(True)
if button.isChecked():
button.setChecked(False)
button.blockSignals(False)
@Slot(str, object, object)
def addColumn(self, columnName, dtype, defaultValue):
"""Adds a column with the given parameters to the underlying model
This method is also a slot.
If no model is set, nothing happens.
Args:
columnName (str): The name of the new column.
dtype (numpy.dtype): The datatype of the new column.
defaultValue (object): Fill the column with this value.
"""
model = self.tableView.model()
if model is not None:
model.addDataFrameColumn(columnName, dtype, defaultValue)
self.addColumnButton.setChecked(False)
@Slot(bool)
def showAddColumnDialog(self, triggered):
"""Display the dialog to add a column to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the dialog will be created and shown.
"""
if triggered:
dialog = AddAttributesDialog(self)
dialog.accepted.connect(self.addColumn)
dialog.rejected.connect(self.uncheckButton)
dialog.show()
@Slot(bool)
def addRow(self, triggered):
"""Adds a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the row will be appended to the end.
"""
if triggered:
model = self.tableView.model()
model.addDataFrameRows()
self.sender().setChecked(False)
@Slot(bool)
def removeRow(self, triggered):
"""Removes a row to the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the selected row will be removed
from the model.
"""
if triggered:
model = self.tableView.model()
selection = self.tableView.selectedIndexes()
rows = [index.row() for index in selection]
model.removeDataFrameRows(set(rows))
self.sender().setChecked(False)
@Slot(list)
def removeColumns(self, columnNames):
"""Removes one or multiple columns from the model.
This method is also a slot.
Args:
columnNames (list): A list of columns, which shall
be removed from the model.
"""
model = self.tableView.model()
if model is not None:
model.removeDataFrameColumns(columnNames)
self.removeColumnButton.setChecked(False)
@Slot(bool)
def showRemoveColumnDialog(self, triggered):
"""Display the dialog to remove column(s) from the model.
This method is also a slot.
Args:
triggered (bool): If the corresponding button was
activated, the dialog will be created and shown.
"""
if triggered:
model = self.tableView.model()
if model is not None:
columns = model.dataFrameColumns()
dialog = RemoveAttributesDialog(columns, self)
dialog.accepted.connect(self.removeColumns)
dialog.rejected.connect(self.uncheckButton)
dialog.show()
def setViewModel(self, model):
"""Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
"""
if isinstance(model, DataFrameModel):
self.enableEditing(False)
self.uncheckButton()
selectionModel = self.tableView.selectionModel()
self.tableView.setModel(model)
model.dtypeChanged.connect(self.updateDelegate)
model.dataChanged.connect(self.updateDelegates)
del selectionModel
def setModel(self, model):
"""Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
"""
self.setViewModel(model)
def model(self):
"""Gets the viewModel"""
return self.view().model()
def viewModel(self):
"""Gets the viewModel"""
return self.view().model()
def view(self):
"""Gets the enclosed TableView
Returns:
QtGui.QTableView: A Qt TableView object.
"""
return self.tableView
def updateDelegate(self, column, dtype):
"""update the delegates for a specific column
Args:
column (int): column index.
dtype (str): data type of column.
"""
# as documented in the setDelegatesFromDtype function
# we need to store all delegates, so going from
# type A -> type B -> type A
# would cause a segfault if not stored.
createDelegate(dtype, column, self.tableView)
def updateDelegates(self):
"""reset all delegates"""
for index, column in enumerate(self.tableView.model().dataFrame().columns):
dtype = self.tableView.model().dataFrame()[column].dtype
self.updateDelegate(index, dtype)
def selectionModel(self):
"""return the table views selectionModel"""
return self.view().selectionModel() | mit | 1,387,420,655,620,008,400 | 33.312821 | 128 | 0.626112 | false | 4.270986 | false | false | false |
PeteAndersen/swarfarm | bestiary/models/items.py | 1 | 6510 | from django.contrib.postgres.fields import ArrayField
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from . import base
ESSENCE_MAP = {
'magic': {
'low': 11006,
'mid': 12006,
'high': 13006,
},
'water': {
'low': 11001,
'mid': 12001,
'high': 13001,
},
'fire': {
'low': 11002,
'mid': 12002,
'high': 13002,
},
'wind': {
'low': 11003,
'mid': 12003,
'high': 13003,
},
'light': {
'low': 11004,
'mid': 12004,
'high': 13004,
},
'dark': {
'low': 11005,
'mid': 12005,
'high': 13005,
},
}
class GameItem(models.Model):
CATEGORY_MONSTER = 1
CATEGORY_CURRENCY = 6
CATEGORY_RUNE = 8
CATEGORY_SUMMON_SCROLL = 9
CATEGORY_BOOSTER = 10
CATEGORY_ESSENCE = 11
CATEGORY_MONSTER_PIECE = 12
CATEOGRY_GUILD_MONSTER_PIECE = 19
CATEGORY_RAINBOWMON = 25
CATEGORY_RUNE_CRAFT = 27
CATEGORY_CRAFT_STUFF = 29
CATEGORY_SECRET_DUNGEON = 30
CATEGORY_MATERIAL_MONSTER = 61
CATEGORY_ARTIFACT = 73
CATEGORY_ARTIFACT_CRAFT = 75
CATEGORY_CHOICES = (
(CATEGORY_MONSTER, 'Monster'),
(CATEGORY_CURRENCY, 'Currency'),
(CATEGORY_SUMMON_SCROLL, 'Summoning Scroll'),
(CATEGORY_BOOSTER, 'Booster'),
(CATEGORY_ESSENCE, 'Essence'),
(CATEGORY_MONSTER_PIECE, 'Monster Piece'),
(CATEOGRY_GUILD_MONSTER_PIECE, 'Guild Monster Piece'),
(CATEGORY_RAINBOWMON, 'Rainbowmon'),
(CATEGORY_RUNE_CRAFT, 'Rune Craft'),
(CATEGORY_CRAFT_STUFF, 'Craft Material'),
(CATEGORY_SECRET_DUNGEON, 'Secret Dungeon'),
(CATEGORY_MATERIAL_MONSTER, 'Enhancing Monster'),
(CATEGORY_ARTIFACT, 'Artifact'),
(CATEGORY_ARTIFACT_CRAFT, 'Artifact Craft Material'),
)
com2us_id = models.IntegerField()
category = models.IntegerField(choices=CATEGORY_CHOICES, help_text='Typically corresponds to `item_master_id` field')
name = models.CharField(max_length=200)
icon = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, default='')
slug = models.CharField(max_length=200)
sell_value = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = (
'com2us_id',
'category',
)
ordering = (
'category',
'com2us_id',
)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def image_tag(self):
if self.icon:
path = static('herders/images/items/' + self.icon)
return mark_safe(f'<img src="{path}" height="42" width="42" loading="lazy" />')
else:
return 'No Image'
class ItemQuantity(models.Model):
# Abstract model for representing quantities of items for various purposes
item = models.ForeignKey(GameItem, on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return f'{self.item.name} - qty. {self.quantity}'
class Meta:
abstract = True
class Building(models.Model, base.Elements):
AREA_GENERAL = 0
AREA_GUILD = 1
AREA_CHOICES = [
(AREA_GENERAL, 'Everywhere'),
(AREA_GUILD, 'Guild Content'),
]
# TODO: Replace these with base.Stats model
STAT_HP = 0
STAT_ATK = 1
STAT_DEF = 2
STAT_SPD = 3
STAT_CRIT_RATE_PCT = 4
STAT_CRIT_DMG_PCT = 5
STAT_RESIST_PCT = 6
STAT_ACCURACY_PCT = 7
MAX_ENERGY = 8
MANA_STONE_STORAGE = 9
MANA_STONE_PRODUCTION = 10
ENERGY_PRODUCTION = 11
ARCANE_TOWER_ATK = 12
ARCANE_TOWER_SPD = 13
STAT_CHOICES = [
(STAT_HP, 'HP'),
(STAT_ATK, 'ATK'),
(STAT_DEF, 'DEF'),
(STAT_SPD, 'SPD'),
(STAT_CRIT_RATE_PCT, 'CRI Rate'),
(STAT_CRIT_DMG_PCT, 'CRI Dmg'),
(STAT_RESIST_PCT, 'Resistance'),
(STAT_ACCURACY_PCT, 'Accuracy'),
(MAX_ENERGY, 'Max. Energy'),
(MANA_STONE_STORAGE, 'Mana Stone Storage'),
(MANA_STONE_PRODUCTION, 'Mana Stone Production Rate'),
(ENERGY_PRODUCTION, 'Energy Production Rate'),
(ARCANE_TOWER_ATK, 'Arcane Tower ATK'),
(ARCANE_TOWER_SPD, 'Arcane Tower SPD'),
]
PERCENT_STATS = [
STAT_HP,
STAT_ATK,
STAT_DEF,
STAT_SPD,
STAT_CRIT_RATE_PCT,
STAT_CRIT_DMG_PCT,
STAT_RESIST_PCT,
STAT_ACCURACY_PCT,
MANA_STONE_PRODUCTION,
ENERGY_PRODUCTION,
ARCANE_TOWER_ATK,
ARCANE_TOWER_SPD,
]
com2us_id = models.IntegerField()
name = models.CharField(max_length=30)
max_level = models.IntegerField()
area = models.IntegerField(choices=AREA_CHOICES, null=True, blank=True)
affected_stat = models.IntegerField(choices=STAT_CHOICES, null=True, blank=True)
element = models.CharField(max_length=6, choices=base.Elements.ELEMENT_CHOICES, blank=True, null=True)
stat_bonus = ArrayField(models.IntegerField(blank=True, null=True))
upgrade_cost = ArrayField(models.IntegerField(blank=True, null=True))
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42" loading="lazy" />' % static('herders/images/buildings/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class Source(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
farmable_source = models.BooleanField(default=False)
meta_order = models.IntegerField(db_index=True, default=0)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42" loading="lazy" />' % static('herders/images/icons/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class Meta:
ordering = ['meta_order', 'icon_filename', 'name']
| apache-2.0 | -2,082,593,143,098,325,200 | 28.862385 | 145 | 0.601075 | false | 3.316353 | false | false | false |
alirizakeles/zato | code/zato-server/test/zato/server/service/internal/kvdb/__init__.py | 1 | 2245 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from traceback import format_exc
# Zato
from zato.common import ZatoException
from zato.common.kvdb import redis_grammar
from zato.server.service.internal import AdminService, AdminSIO
class ExecuteCommand(AdminService):
""" Executes a command against the key/value DB.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_kvdb_remote_command_execute_request'
response_elem = 'zato_kvdb_remote_command_execute_response'
input_required = ('command',)
output_required = ('result',)
def handle(self):
input_command = self.request.input.command or ''
if not input_command:
msg = 'No command sent'
raise ZatoException(self.cid, msg)
try:
parse_result = redis_grammar.parseString(input_command)
options = {}
command = parse_result.command
parameters = parse_result.parameters if parse_result.parameters else []
if command == 'CONFIG':
options['parse'] = parameters[0]
elif command == 'OBJECT':
options['infotype'] = parameters[0]
response = self.server.kvdb.conn.execute_command(command, *parameters, **options) or ''
if response and command in('KEYS', 'HKEYS', 'HVALS'):
response = unicode(response).encode('utf-8')
elif command in('HLEN', 'LLEN', 'LRANGE', 'SMEMBERS', 'HGETALL'):
response = str(response)
self.response.payload.result = response
except Exception, e:
msg = 'Command parsing error, command:[{}], e:[{}]'.format(input_command, format_exc(e))
self.logger.error(msg)
raise ZatoException(self.cid, msg)
# The data browser will most likely be implemented in a future version
'''
class GetList(AdminService):
""" Returns a list of keys, optionally including their values.
"""
# KEYS, then
# HGETALL
# GET
# LRANGE
# SMEMBERS
'''
| gpl-3.0 | 4,555,896,742,429,259,300 | 30.180556 | 100 | 0.621826 | false | 3.945518 | false | false | false |
semplice/quickstart | quickstart/builder.py | 1 | 2423 | # -*- coding: utf-8 -*-
#
# quickstart - Refreshing the GUI world.
# Copyright (C) 2013 Eugenio "g7" Paolantonio
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from gi.repository import Gtk
class from_file:
""" builder.from_file is a class decorator that will automatically load
the UI file specified in the arguments in a Gtk.Builder object.
A Dynamic dictionary will be created in class.objects. This special dict
gets the UI objects on-the-fly.
The only required parameter is the UI file position.
Usage example:
@quickstart.builder.from_file("./test.glade")
class GUI:
def __init__(self):
self.objects["main_window"].show_all()
The Gtk.Builder object will be created at class.__builder. """
class DynamicObjectsDictionary(dict):
""" A dynamic dictionary! """
def __init__(self, builder):
self.builder = builder
dict.__init__(self)
def __getitem__(self, key):
""" Returns the specified object if it is already in the dictionary,
otherwise gets it from the builder first and then returns it. """
itm = self.get(key)
if not itm:
obj = self.builder.get_object(key)
if not obj:
raise Exception("Object %s not found!" % key)
self[key] = obj
itm = obj
return itm
__getattr__ = __getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, uipath):
""" Initializes the class. """
self.uipath = uipath
def __call__(self, clss):
""" Magic. """
def wrapper(*args, **kwargs):
clss.__builder = Gtk.Builder()
clss.__builder.add_from_file(self.uipath)
clss.objects = self.DynamicObjectsDictionary(clss.__builder)
return clss(*args, **kwargs)
return wrapper
| lgpl-2.1 | -5,219,807,691,265,484,000 | 27.505882 | 80 | 0.684276 | false | 3.638138 | false | false | false |
junmin-zhu/chromium-rivertrail | chrome/common/extensions/docs/server2/build_server.py | 1 | 2634 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script is used to copy all dependencies into the local directory.
# The package of files can then be uploaded to App Engine.
import os
import shutil
import stat
import sys
SRC_DIR = os.path.join(sys.path[0], os.pardir, os.pardir, os.pardir, os.pardir,
os.pardir)
THIRD_PARTY_DIR = os.path.join(SRC_DIR, 'third_party')
LOCAL_THIRD_PARTY_DIR = os.path.join(sys.path[0], 'third_party')
TOOLS_DIR = os.path.join(SRC_DIR, 'tools')
SCHEMA_COMPILER_FILES = ['model.py', 'idl_schema.py', 'schema_util.py']
def MakeInit(path):
path = os.path.join(path, '__init__.py')
with open(os.path.join(path), 'w') as f:
os.utime(os.path.join(path), None)
def OnError(function, path, excinfo):
os.chmod(path, stat.S_IWUSR)
function(path)
def CopyThirdParty(src, dest, files=None):
dest_path = os.path.join(LOCAL_THIRD_PARTY_DIR, dest)
if not files:
shutil.copytree(src, dest_path)
MakeInit(dest_path)
return
try:
os.makedirs(dest_path)
except Exception:
pass
MakeInit(dest_path)
for filename in files:
shutil.copy(os.path.join(src, filename), os.path.join(dest_path, filename))
def main():
if os.path.isdir(LOCAL_THIRD_PARTY_DIR):
try:
shutil.rmtree(LOCAL_THIRD_PARTY_DIR, False, OnError)
except OSError:
print('*-------------------------------------------------------------*\n'
'| If you are receiving an upload error, try removing |\n'
'| chrome/common/extensions/docs/server2/third_party manually. |\n'
'*-------------------------------------------------------------*\n')
CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'handlebar'), 'handlebar')
CopyThirdParty(os.path.join(SRC_DIR, 'ppapi', 'generators'),
'json_schema_compiler')
CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'ply'),
os.path.join('json_schema_compiler', 'ply'))
CopyThirdParty(os.path.join(TOOLS_DIR, 'json_schema_compiler'),
'json_schema_compiler',
SCHEMA_COMPILER_FILES)
CopyThirdParty(TOOLS_DIR, 'json_schema_compiler', ['json_comment_eater.py'])
MakeInit(LOCAL_THIRD_PARTY_DIR)
# To be able to use the Handlebar class we need this import in __init__.py.
with open(os.path.join(LOCAL_THIRD_PARTY_DIR,
'handlebar',
'__init__.py'), 'a') as f:
f.write('from handlebar import Handlebar\n')
if __name__ == '__main__':
main()
| bsd-3-clause | 5,980,907,068,329,087,000 | 35.583333 | 80 | 0.621868 | false | 3.247842 | false | false | false |
ddimensia/RaceCapture_App | autosportlabs/comms/comms.py | 1 | 5498 | import traceback
import threading
import multiprocessing
from Queue import Empty
from time import sleep
from kivy.logger import Logger
from autosportlabs.comms.commscommon import PortNotOpenException
STAY_ALIVE_TIMEOUT = 4
COMMAND_CLOSE = 'CLOSE'
COMMAND_KEEP_ALIVE = 'PING'
def connection_process_message_reader(rx_queue, connection, should_run):
Logger.debug('Comms: connection process message reader started')
while should_run.is_set():
try:
msg = connection.read_line()
if msg:
rx_queue.put(msg)
except:
Logger.error('Comms: Exception in connection_process_message_reader')
Logger.debug(traceback.format_exc())
should_run.clear()
sleep(0.5)
Logger.debug('Comms: connection process message reader exited')
def connection_process_message_writer(tx_queue, connection, should_run):
Logger.debug('Comms: connection process message writer started')
while should_run.is_set():
try:
message = tx_queue.get(True, 1.0)
if message:
connection.write(message)
except Empty:
pass
except Exception as e:
Logger.error('Comms: Exception in connection_process_message_writer ' + str(e))
Logger.debug(traceback.format_exc())
should_run.clear()
sleep(0.5)
Logger.debug('Comms: connection process message writer exited')
def connection_message_process(connection, device, rx_queue, tx_queue, command_queue):
Logger.debug('Comms: connection process starting')
try:
connection.open(device)
connection.flushInput()
connection.flushOutput()
reader_writer_should_run = threading.Event()
reader_writer_should_run.set()
reader_thread = threading.Thread(target=connection_process_message_reader, args=(rx_queue, connection, reader_writer_should_run))
reader_thread.start()
writer_thread = threading.Thread(target=connection_process_message_writer, args=(tx_queue, connection, reader_writer_should_run))
writer_thread.start()
while reader_writer_should_run.is_set():
try:
command = command_queue.get(True, STAY_ALIVE_TIMEOUT)
if command == COMMAND_CLOSE:
Logger.debug('Comms: connection process: got close command')
reader_writer_should_run.clear()
except Empty:
Logger.debug('Comms: keep alive timeout')
reader_writer_should_run.clear()
Logger.debug('Comms: connection worker exiting')
reader_thread.join()
writer_thread.join()
try:
connection.close()
except:
Logger.debug('Comms: Exception closing connection worker connection')
Logger.debug(traceback.format_exc())
except Exception as e:
Logger.debug('Comms: Exception setting up connection process: ' + str(type(e)) + str(e))
Logger.trace(traceback.format_exc())
Logger.debug('Comms: connection worker exited')
class Comms():
CONNECT_TIMEOUT = 1.0
DEFAULT_TIMEOUT = 1.0
QUEUE_FULL_TIMEOUT = 1.0
_timeout = DEFAULT_TIMEOUT
device = None
_connection = None
_connection_process = None
_rx_queue = None
_tx_queue = None
_command_queue = None
def __init__(self, device, connection):
self.device = device
self._connection = connection
self.supports_streaming = False
def start_connection_process(self):
rx_queue = multiprocessing.Queue()
tx_queue = multiprocessing.Queue(5)
command_queue = multiprocessing.Queue()
connection_process = multiprocessing.Process(target=connection_message_process, args=(self._connection, self.device, rx_queue, tx_queue, command_queue))
connection_process.start()
self._rx_queue = rx_queue
self._tx_queue = tx_queue
self._command_queue = command_queue
self._connection_process = connection_process
def get_available_devices(self):
return self._connection.get_available_devices()
def isOpen(self):
return self._connection_process != None and self._connection_process.is_alive()
def open(self):
connection = self._connection
Logger.debug('Comms: Opening connection ' + str(self.device))
self.start_connection_process()
def keep_alive(self):
try:
self._command_queue.put_nowait(COMMAND_KEEP_ALIVE)
except:
pass
def close(self):
Logger.debug('Comms: comms.close()')
if self.isOpen():
try:
Logger.debug('Comms: closing connection process')
self._command_queue.put_nowait(COMMAND_CLOSE)
self._connection_process.join(self._timeout * 2)
Logger.debug('Comms: connection process joined')
except:
Logger.error('Comms: Timeout joining connection process')
def read_message(self):
if not self.isOpen():
raise PortNotOpenException('Port Closed')
try:
return self._rx_queue.get(True, self._timeout)
except: # returns Empty object if timeout is hit
return None
def write_message(self, message):
if not self.isOpen(): raise PortNotOpenException('Port Closed')
self._tx_queue.put(message, True, Comms.QUEUE_FULL_TIMEOUT)
| gpl-3.0 | -2,222,995,248,042,992,600 | 34.701299 | 160 | 0.631139 | false | 4.219493 | false | false | false |
franklingu/leetcode-solutions | questions/palindrome-linked-list/Solution.py | 1 | 1513 | """
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
def reverse_ll(head):
prev, curr, ne = None, head, head.next
while curr is not None:
curr.next = prev
prev = curr
curr = ne
if ne:
ne = ne.next
return prev
if head is None or head.next is None:
return True
fast, slow = head, head
while fast is not None:
fast = fast.next
if fast is None:
break
fast = fast.next
if fast is None:
break
slow = slow.next
second = slow.next
slow.next = None
new_head = reverse_ll(second)
curr1, curr2 = head, new_head
ret = True
while curr1 is not None and curr2 is not None:
if curr1.val != curr2.val:
ret = False
break
curr1 = curr1.next
curr2 = curr2.next
slow.next = reverse_ll(new_head)
return ret
| mit | -2,569,556,633,888,039,400 | 23.403226 | 60 | 0.489095 | false | 3.940104 | false | false | false |
nelseric/qmk_firmware | lib/python/qmk/cli/new/keymap.py | 16 | 1884 | """This script automates the copying of the default keymap into your own keymap.
"""
import shutil
from pathlib import Path
import qmk.path
from qmk.decorators import automagic_keyboard, automagic_keymap
from milc import cli
@cli.argument('-kb', '--keyboard', help='Specify keyboard name. Example: 1upkeyboards/1up60hse')
@cli.argument('-km', '--keymap', help='Specify the name for the new keymap directory')
@cli.subcommand('Creates a new keymap for the keyboard of your choosing')
@automagic_keyboard
@automagic_keymap
def new_keymap(cli):
"""Creates a new keymap for the keyboard of your choosing.
"""
# ask for user input if keyboard or keymap was not provided in the command line
keyboard = cli.config.new_keymap.keyboard if cli.config.new_keymap.keyboard else input("Keyboard Name: ")
keymap = cli.config.new_keymap.keymap if cli.config.new_keymap.keymap else input("Keymap Name: ")
# generate keymap paths
kb_path = Path('keyboards') / keyboard
keymap_path = qmk.path.keymap(keyboard)
keymap_path_default = keymap_path / 'default'
keymap_path_new = keymap_path / keymap
# check directories
if not kb_path.exists():
cli.log.error('Keyboard %s does not exist!', kb_path)
return False
if not keymap_path_default.exists():
cli.log.error('Keyboard default %s does not exist!', keymap_path_default)
return False
if keymap_path_new.exists():
cli.log.error('Keymap %s already exists!', keymap_path_new)
return False
# create user directory with default keymap files
shutil.copytree(keymap_path_default, keymap_path_new, symlinks=True)
# end message to user
cli.log.info("%s keymap directory created in: %s", keymap, keymap_path_new)
cli.log.info("Compile a firmware with your new keymap by typing: \n\n\tqmk compile -kb %s -km %s\n", keyboard, keymap)
| gpl-2.0 | 5,085,182,209,697,416,000 | 39.085106 | 122 | 0.706476 | false | 3.574953 | false | false | false |
brahle/I-Rcbot | irc/mysocket.py | 1 | 1895 | #!/usr/bin/env python2.6
# Zeckviz IRC bot
# Copyright (C) 2011 Bruno Rahle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
class MySocket(socket.socket):
"""Extends socket.socket class and adds the functionality to reads the data
from socket line by line.
"""
BUFFER_SIZE = 4096 # size of the buffer to read
def __init__(self, host, port):
"""Creates the socket.
"""
super(MySocket, self).__init__()
self.connect((host, port))
self._buffer = ''
self._pos = 0
def readline(self):
"""Reads the next line from the socket.
NOTE: Ignores the timeout and blocking status. It just waits for the
complete line to be sent to the socket and returns it.
TODO: account for timeout and blocking status.
"""
line = ''
i = 0
while True:
while (self._pos == len(self._buffer)):
self._buffer = self.recv(self.BUFFER_SIZE)
self._pos = 0
end = self._buffer.find('\n', self._pos)
line = line + self._buffer[self._pos:end]
if end == -1:
self._pos = len(self._buffer)
else:
self._pos = end + 1
return line
| agpl-3.0 | -2,516,814,916,496,474,000 | 34.092593 | 79 | 0.616359 | false | 4.137555 | false | false | false |
ctogle/dilapidator | src/dilap/BROKEN/generate/toremove/street.py | 1 | 1258 | import dilap.core.context as dgc
import dilap.generate.landscape as dls
import dilap.generate.lot as dlt
import dilap.primitive.road as dr
import dp_vector as dpv
import dp_quaternion as dpq
class street(dgc.context):
def generate(self,worn = 0):
start = dpv.vector(-100,-300, 20)
end = dpv.vector( 100, 300,-10)
tip = dpv.vector(0,1,0)
tail = dpv.vector(1,1,0)
cs = [dpv.vector(-100,-100, 10),dpv.vector( 100, 100,-10)]
rd = dr.road(start,end,tip,tail,controls = cs)
self._nodes_to_graph(self._node_wrap(rd))
#bbs = []
#lotspace = rd._lotspace(bbs)
#dlot = dlt.lot(lotspace[0],lotspace[1]).generate(worn)
#lsppos,lsprot = lotspace[2],lotspace[3]
#dlot._transform(lsppos,lsprot,dpv.one())
#self._consume(dlot)
#lotspace = rd._lotspace(bbs)
#dlot = dlt.lot(lotspace[0],lotspace[1]).generate(worn)
#lsppos,lsprot = lotspace[2],lotspace[3]
#dlot._transform(lsppos,lsprot,dpv.one())
#self._consume(dlot)
tpts = []
#tpts.extend(dlot.terrain_points)
tpts.extend(rd._terrain_points())
lscape = dls.landscape(controls = tpts)
self._consume(lscape.generate(worn))
| mit | -4,722,324,031,005,298,000 | 30.45 | 66 | 0.605723 | false | 2.833333 | false | false | false |
idlesign/django-etc | etc/admin/admins.py | 1 | 2599 | from django.contrib import admin
from django.contrib import messages
from django.db import models
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.urls import path
if False: # pragma: nocover
from .models import CustomModelPage # noqa
class EtcAdmin(admin.ModelAdmin):
"""Base etc admin."""
def message_success(self, request: HttpRequest, msg: str):
self.message_user(request, msg, messages.SUCCESS)
def message_warning(self, request: HttpRequest, msg: str):
self.message_user(request, msg, messages.WARNING)
def message_error(self, request: HttpRequest, msg: str):
self.message_user(request, msg, messages.ERROR)
class ReadonlyAdmin(EtcAdmin):
"""Read-only etc admin base class."""
view_on_site: bool = False
actions = None
def has_add_permission(self, request: HttpRequest) -> bool:
return False
def has_delete_permission(self, request: HttpRequest, obj: models.Model = None) -> bool:
return False
def changeform_view(
self,
request: HttpRequest,
object_id: int = None,
form_url: str = '',
extra_context: dict = None
) -> HttpResponse:
extra_context = extra_context or {}
extra_context.update({
'show_save_and_continue': False,
'show_save': False,
})
return super().changeform_view(request, object_id, extra_context=extra_context)
class CustomPageModelAdmin(ReadonlyAdmin):
"""Base for admin pages with contents based on custom models."""
def get_urls(self) -> list:
meta = self.model._meta
patterns = [path(
'',
self.admin_site.admin_view(self.view_custom),
name=f'{meta.app_label}_{meta.model_name}_changelist'
)]
return patterns
def has_add_permission(self, request: HttpRequest) -> bool:
return True
def view_custom(self, request: HttpRequest) -> HttpResponse:
context: dict = {
'show_save_and_continue': False,
'show_save_and_add_another': False,
'title': self.model._meta.verbose_name,
}
return self._changeform_view(request, object_id=None, form_url='', extra_context=context)
def response_add(self, request: HttpRequest, obj: 'CustomModelPage', post_url_continue=None):
return HttpResponseRedirect(request.path)
def save_model(self, request: HttpRequest, obj: 'CustomModelPage', form, change):
obj.bound_request = request
obj.bound_admin = self
obj.save()
| bsd-3-clause | 7,494,133,136,866,920,000 | 31.08642 | 97 | 0.643709 | false | 4.048287 | false | false | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/txclib/project.py | 1 | 48650 | # -*- coding: utf-8 -*-
import getpass
import os
import re
import fnmatch
import datetime
import time
import ssl
try:
import configparser
except ImportError:
import ConfigParser as configparser
from txclib.web import *
from txclib.utils import *
from txclib.packages import urllib3
from txclib.packages.urllib3.packages import six
from txclib.urls import API_URLS
from txclib.config import OrderedRawConfigParser, Flipdict
from txclib.log import logger
from txclib.processors import visit_hostname
from txclib.paths import posix_path, native_path, posix_sep
from txclib.packages.urllib3.exceptions import SSLError
class ProjectNotInit(Exception):
pass
class Project(object):
"""
Represents an association between the local and remote project instances.
"""
def __init__(self, path_to_tx=None, init=True):
"""
Initialize the Project attributes.
"""
if init:
self._init(path_to_tx)
def _init(self, path_to_tx=None):
instructions = "Run 'tx init' to initialize your project first!"
try:
self.root = self._get_tx_dir_path(path_to_tx)
self.config_file = self._get_config_file_path(self.root)
self.config = self._read_config_file(self.config_file)
self.txrc_file = self._get_transifex_file()
local_txrc_file = self._get_transifex_file(os.getcwd())
self.txrc = self._get_transifex_config([self.txrc_file, local_txrc_file])
if os.path.exists(local_txrc_file):
self.txrc_file = local_txrc_file
except ProjectNotInit as e:
logger.error('\n'.join([six.u(str(e)), instructions]))
raise
host = self.config.get('main', 'host')
if host.lower().startswith('https://'):
self.conn = urllib3.connection_from_url(
host,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certs_file()
)
else:
self.conn = urllib3.connection_from_url(host)
def _get_config_file_path(self, root_path):
"""Check the .tx/config file exists."""
config_file = os.path.join(root_path, ".tx", "config")
logger.debug("Config file is %s" % config_file)
if not os.path.exists(config_file):
msg = "Cannot find the config file (.tx/config)!"
raise ProjectNotInit(msg)
return config_file
def _get_tx_dir_path(self, path_to_tx):
"""Check the .tx directory exists."""
root_path = path_to_tx or find_dot_tx()
logger.debug("Path to tx is %s." % root_path)
if not root_path:
msg = "Cannot find any .tx directory!"
raise ProjectNotInit(msg)
return root_path
def _read_config_file(self, config_file):
"""Parse the config file and return its contents."""
config = OrderedRawConfigParser()
try:
config.read(config_file)
except Exception as err:
msg = "Cannot open/parse .tx/config file: %s" % err
raise ProjectNotInit(msg)
return config
def _get_transifex_config(self, txrc_files):
"""Read the configuration from the .transifexrc files."""
txrc = OrderedRawConfigParser()
try:
txrc.read(txrc_files)
except Exception as e:
msg = "Cannot read configuration file: %s" % e
raise ProjectNotInit(msg)
self._migrate_txrc_file(txrc)
return txrc
def _migrate_txrc_file(self, txrc):
"""Migrate the txrc file, if needed."""
if not os.path.exists(self.txrc_file):
return txrc
for section in txrc.sections():
orig_hostname = txrc.get(section, 'hostname')
hostname = visit_hostname(orig_hostname)
if hostname != orig_hostname:
msg = "Hostname %s should be changed to %s."
logger.info(msg % (orig_hostname, hostname))
if (sys.stdin.isatty() and sys.stdout.isatty() and
confirm('Change it now? ', default=True)):
txrc.set(section, 'hostname', hostname)
msg = 'Hostname changed'
logger.info(msg)
else:
hostname = orig_hostname
self._save_txrc_file(txrc)
return txrc
def _get_transifex_file(self, directory=None):
"""Fetch the path of the .transifexrc file.
It is in the home directory of the user by default.
"""
if directory is not None:
logger.debug(".transifexrc file is at %s" % directory)
return os.path.join(directory, ".transifexrc")
directory = os.path.expanduser('~')
txrc_file = os.path.join(directory, ".transifexrc")
logger.debug(".transifexrc file is at %s" % directory)
if not os.path.exists(txrc_file):
msg = "%s not found." % (txrc_file)
logger.info(msg)
mask = os.umask(0o077)
open(txrc_file, 'w').close()
os.umask(mask)
return txrc_file
def validate_config(self):
"""
To ensure the json structure is correctly formed.
"""
pass
def getset_host_credentials(self, host, user=None, password=None):
"""
Read .transifexrc and report user,pass for a specific host else ask the
user for input.
"""
try:
username = self.txrc.get(host, 'username')
passwd = self.txrc.get(host, 'password')
except (configparser.NoOptionError, configparser.NoSectionError):
logger.info("No entry found for host %s. Creating..." % host)
username = user or input("Please enter your transifex username: ")
while (not username):
username = input("Please enter your transifex username: ")
passwd = password
while (not passwd):
passwd = getpass.getpass()
logger.info("Updating %s file..." % self.txrc_file)
self.txrc.add_section(host)
self.txrc.set(host, 'username', username)
self.txrc.set(host, 'password', passwd)
self.txrc.set(host, 'token', '')
self.txrc.set(host, 'hostname', host)
return username, passwd
def set_remote_resource(self, resource, source_lang, i18n_type, host,
file_filter="translations<sep>%(proj)s.%(res)s<sep><lang>.%(extension)s"):
"""Method to handle the add/conf of a remote resource."""
if not self.config.has_section(resource):
self.config.add_section(resource)
p_slug, r_slug = resource.split('.', 1)
file_filter = file_filter.replace("<sep>", r"%s" % posix_sep)
self.url_info = {
'host': host,
'project': p_slug,
'resource': r_slug
}
extension = self._extension_for(i18n_type)[1:]
self.config.set(resource, 'source_lang', source_lang)
self.config.set(
resource, 'file_filter',
file_filter % {'proj': p_slug, 'res': r_slug, 'extension': extension}
)
self.config.set(resource, 'type', i18n_type)
if host != self.config.get('main', 'host'):
self.config.set(resource, 'host', host)
def get_resource_host(self, resource):
"""
Returns the host that the resource is configured to use. If there is no
such option we return the default one
"""
return self.config.get('main', 'host')
def get_resource_lang_mapping(self, resource):
"""Get language mappings for a specific resource."""
lang_map = Flipdict()
try:
args = self.config.get("main", "lang_map")
for arg in args.replace(' ', '').split(','):
k,v = arg.split(":")
lang_map.update({k:v})
except configparser.NoOptionError:
pass
except (ValueError, KeyError):
raise Exception("Your lang map configuration is not correct.")
if self.config.has_section(resource):
res_lang_map = Flipdict()
try:
args = self.config.get(resource, "lang_map")
for arg in args.replace(' ', '').split(','):
k,v = arg.split(":")
res_lang_map.update({k:v})
except configparser.NoOptionError:
pass
except (ValueError, KeyError):
raise Exception("Your lang map configuration is not correct.")
# merge the lang maps and return result
lang_map.update(res_lang_map)
return lang_map
def get_source_file(self, resource):
"""
Get source file for a resource.
"""
if self.config.has_section(resource):
source_lang = self.config.get(resource, "source_lang")
source_file = self.get_resource_option(resource, 'source_file') or None
if source_file is None:
try:
file_filter = self.config.get(resource, "file_filter")
filename = file_filter.replace('<lang>', source_lang)
if os.path.exists(filename):
return native_path(filename)
except configparser.NoOptionError:
pass
else:
return native_path(source_file)
def get_resource_files(self, resource):
"""
Get a dict for all files assigned to a resource. First we calculate the
files matching the file expression and then we apply all translation
excpetions. The resulting dict will be in this format:
{ 'en': 'path/foo/en/bar.po', 'de': 'path/foo/de/bar.po', 'es': 'path/exceptions/es.po'}
NOTE: All paths are relative to the root of the project
"""
tr_files = {}
if self.config.has_section(resource):
try:
file_filter = self.config.get(resource, "file_filter")
except configparser.NoOptionError:
file_filter = "$^"
source_lang = self.config.get(resource, "source_lang")
source_file = self.get_source_file(resource)
expr_re = regex_from_filefilter(file_filter, self.root)
expr_rec = re.compile(expr_re)
for f_path in files_in_project(self.root):
match = expr_rec.match(posix_path(f_path))
if match:
lang = match.group(1)
if lang != source_lang:
f_path = os.path.relpath(f_path, self.root)
if f_path != source_file:
tr_files.update({lang: f_path})
for (name, value) in self.config.items(resource):
if name.startswith("trans."):
value = native_path(value)
lang = name.split('.')[1]
# delete language which has same file
if value in list(tr_files.values()):
keys = []
for k, v in six.iteritems(tr_files):
if v == value:
keys.append(k)
if len(keys) == 1:
del tr_files[keys[0]]
else:
raise Exception("Your configuration seems wrong."\
" You have multiple languages pointing to"\
" the same file.")
# Add language with correct file
tr_files.update({lang:value})
return tr_files
return None
def get_resource_option(self, resource, option):
"""
Return the requested option for a specific resource
If there is no such option, we return None
"""
if self.config.has_section(resource):
if self.config.has_option(resource, option):
return self.config.get(resource, option)
return None
def get_resource_list(self, project=None):
"""
Parse config file and return tuples with the following format
[ (project_slug, resource_slug), (..., ...)]
"""
resource_list= []
for r in self.config.sections():
if r == 'main':
continue
p_slug, r_slug = r.split('.', 1)
if project and p_slug != project:
continue
resource_list.append(r)
return resource_list
def save(self):
"""
Store the config dictionary in the .tx/config file of the project.
"""
self._save_tx_config()
self._save_txrc_file()
def _save_tx_config(self, config=None):
"""Save the local config file."""
if config is None:
config = self.config
fh = open(self.config_file,"w")
config.write(fh)
fh.close()
def _save_txrc_file(self, txrc=None):
"""Save the .transifexrc file."""
if txrc is None:
txrc = self.txrc
mask = os.umask(0o077)
fh = open(self.txrc_file, 'w')
txrc.write(fh)
fh.close()
os.umask(mask)
def get_full_path(self, relpath):
if relpath[0] == os.path.sep:
return relpath
else:
return os.path.join(self.root, relpath)
def _get_pseudo_file(self, slang, resource, file_filter):
pseudo_file = file_filter.replace('<lang>', '%s_pseudo' % slang)
return native_path(pseudo_file)
def pull(self, languages=[], resources=[], overwrite=True, fetchall=False,
fetchsource=False, force=False, skip=False, minimum_perc=0, mode=None,
pseudo=False):
"""Pull all translations file from transifex server."""
self.minimum_perc = minimum_perc
resource_list = self.get_chosen_resources(resources)
if mode == 'reviewed':
url = 'pull_reviewed_file'
elif mode == 'translator':
url = 'pull_translator_file'
elif mode == 'developer':
url = 'pull_developer_file'
else:
url = 'pull_file'
for resource in resource_list:
logger.debug("Handling resource %s" % resource)
self.resource = resource
project_slug, resource_slug = resource.split('.', 1)
files = self.get_resource_files(resource)
slang = self.get_resource_option(resource, 'source_lang')
sfile = self.get_source_file(resource)
lang_map = self.get_resource_lang_mapping(resource)
host = self.get_resource_host(resource)
logger.debug("Language mapping is: %s" % lang_map)
if mode is None:
mode = self._get_option(resource, 'mode')
self.url_info = {
'host': host,
'project': project_slug,
'resource': resource_slug
}
logger.debug("URL data are: %s" % self.url_info)
stats = self._get_stats_for_resource()
try:
file_filter = self.config.get(resource, 'file_filter')
except configparser.NoOptionError:
file_filter = None
# Pull source file
pull_languages = set([])
new_translations = set([])
if pseudo:
pseudo_file = self._get_pseudo_file(
slang, resource, file_filter
)
if self._should_download(slang, stats, local_file=pseudo_file):
logger.info("Pulling pseudo file for resource %s (%s)." % (
resource,
color_text(pseudo_file, "RED")
))
self._download_pseudo(
project_slug, resource_slug, pseudo_file
)
if not languages:
continue
if fetchall:
new_translations = self._new_translations_to_add(
files, slang, lang_map, stats, force
)
if new_translations:
msg = "New translations found for the following languages: %s"
logger.info(msg % ', '.join(new_translations))
existing, new = self._languages_to_pull(
languages, files, lang_map, stats, force
)
pull_languages |= existing
new_translations |= new
logger.debug("Adding to new translations: %s" % new)
if fetchsource:
if sfile and slang not in pull_languages:
pull_languages.add(slang)
elif slang not in new_translations:
new_translations.add(slang)
if pull_languages:
logger.debug("Pulling languages for: %s" % pull_languages)
msg = "Pulling translations for resource %s (source: %s)"
logger.info(msg % (resource, sfile))
for lang in pull_languages:
local_lang = lang
if lang in list(lang_map.values()):
remote_lang = lang_map.flip[lang]
else:
remote_lang = lang
if languages and lang not in pull_languages:
logger.debug("Skipping language %s" % lang)
continue
if lang != slang:
local_file = files.get(lang, None) or files[lang_map[lang]]
else:
local_file = sfile
logger.debug("Using file %s" % local_file)
kwargs = {
'lang': remote_lang,
'stats': stats,
'local_file': local_file,
'force': force,
'mode': mode,
}
if not self._should_update_translation(**kwargs):
msg = "Skipping '%s' translation (file: %s)."
logger.info(
msg % (color_text(remote_lang, "RED"), local_file)
)
continue
if not overwrite:
local_file = ("%s.new" % local_file)
logger.warning(
" -> %s: %s" % (color_text(remote_lang, "RED"), local_file)
)
try:
r, charset = self.do_url_request(url, language=remote_lang)
except Exception as e:
if isinstance(e, SSLError) or not skip:
raise
else:
logger.error(e)
continue
base_dir = os.path.split(local_file)[0]
mkdir_p(base_dir)
fd = open(local_file, 'wb')
fd.write(r.encode(charset))
fd.close()
if new_translations:
msg = "Pulling new translations for resource %s (source: %s)"
logger.info(msg % (resource, sfile))
for lang in new_translations:
if lang in list(lang_map.keys()):
local_lang = lang_map[lang]
else:
local_lang = lang
remote_lang = lang
if file_filter:
local_file = os.path.relpath(
os.path.join(
self.root, native_path(
file_filter.replace('<lang>', local_lang)
)
), os.curdir
)
else:
trans_dir = os.path.join(self.root, ".tx", resource)
if not os.path.exists(trans_dir):
os.mkdir(trans_dir)
local_file = os.path.relpath(os.path.join(trans_dir, '%s_translation' %
local_lang, os.curdir))
if lang != slang:
satisfies_min = self._satisfies_min_translated(
stats[remote_lang], mode
)
if not satisfies_min:
msg = "Skipping language %s due to used options."
logger.info(msg % lang)
continue
logger.warning(
" -> %s: %s" % (color_text(remote_lang, "RED"), local_file)
)
r, charset = self.do_url_request(url, language=remote_lang)
base_dir = os.path.split(local_file)[0]
mkdir_p(base_dir)
fd = open(local_file, 'wb')
fd.write(r.encode(charset))
fd.close()
def push(self, source=False, translations=False, force=False, resources=[], languages=[],
skip=False, no_interactive=False):
"""
Push all the resources
"""
resource_list = self.get_chosen_resources(resources)
self.skip = skip
self.force = force
for resource in resource_list:
push_languages = []
project_slug, resource_slug = resource.split('.', 1)
files = self.get_resource_files(resource)
slang = self.get_resource_option(resource, 'source_lang')
sfile = self.get_source_file(resource)
lang_map = self.get_resource_lang_mapping(resource)
host = self.get_resource_host(resource)
logger.debug("Language mapping is: %s" % lang_map)
logger.debug("Using host %s" % host)
self.url_info = {
'host': host,
'project': project_slug,
'resource': resource_slug
}
logger.info("Pushing translations for resource %s:" % resource)
stats = self._get_stats_for_resource()
if force and not no_interactive:
answer = input("Warning: By using --force, the uploaded"
" files will overwrite remote translations, even if they"
" are newer than your uploaded files.\nAre you sure you"
" want to continue? [y/N] ")
if not answer in ["", 'Y', 'y', "yes", 'YES']:
return
if source:
if sfile is None:
logger.error("You don't seem to have a proper source file"
" mapping for resource %s. Try without the --source"
" option or set a source file first and then try again." %
resource)
continue
# Push source file
try:
logger.warning("Pushing source file (%s)" % sfile)
if not self._resource_exists(stats):
logger.info("Resource does not exist. Creating...")
fileinfo = "%s;%s" % (resource_slug, slang)
filename = self.get_full_path(sfile)
self._create_resource(resource, project_slug, fileinfo, filename)
self.do_url_request(
'push_source', multipart=True, method="PUT",
files=[(
"%s;%s" % (resource_slug, slang)
, self.get_full_path(sfile)
)],
)
except Exception as e:
if isinstance(e, SSLError) or not skip:
raise
else:
logger.error(e)
else:
try:
self.do_url_request('resource_details')
except Exception as e:
if isinstance(e, SSLError):
raise
code = getattr(e, 'code', None)
if code == 404:
msg = "Resource %s doesn't exist on the server."
logger.error(msg % resource)
continue
if translations:
# Check if given language codes exist
if not languages:
push_languages = list(files.keys())
else:
push_languages = []
f_langs = list(files.keys())
for l in languages:
if l in list(lang_map.keys()):
l = lang_map[l]
push_languages.append(l)
if l not in f_langs:
msg = "Warning: No mapping found for language code '%s'."
logger.error(msg % color_text(l,"RED"))
logger.debug("Languages to push are %s" % push_languages)
# Push translation files one by one
for lang in push_languages:
local_lang = lang
if lang in list(lang_map.values()):
remote_lang = lang_map.flip[lang]
else:
remote_lang = lang
local_file = files[local_lang]
kwargs = {
'lang': remote_lang,
'stats': stats,
'local_file': local_file,
'force': force,
}
if not self._should_push_translation(**kwargs):
msg = "Skipping '%s' translation (file: %s)."
logger.info(msg % (color_text(lang, "RED"), local_file))
continue
msg = "Pushing '%s' translations (file: %s)"
logger.warning(
msg % (color_text(remote_lang, "RED"), local_file)
)
try:
self.do_url_request(
'push_translation', multipart=True, method='PUT',
files=[(
"%s;%s" % (resource_slug, remote_lang),
self.get_full_path(local_file)
)], language=remote_lang
)
logger.debug("Translation %s pushed." % remote_lang)
except HttpNotFound:
if not source:
logger.error("Resource hasn't been created. Try pushing source file.")
except Exception as e:
if isinstance(e, SSLError) or not skip:
raise
else:
logger.error(e)
def delete(self, resources=[], languages=[], skip=False, force=False):
"""Delete translations."""
resource_list = self.get_chosen_resources(resources)
self.skip = skip
self.force = force
if not languages:
delete_func = self._delete_resource
else:
delete_func = self._delete_translations
for resource in resource_list:
project_slug, resource_slug = resource.split('.', 1)
host = self.get_resource_host(resource)
self.url_info = {
'host': host,
'project': project_slug,
'resource': resource_slug
}
logger.debug("URL data are: %s" % self.url_info)
json, _ = self.do_url_request('project_details', project=self)
project_details = parse_json(json)
teams = project_details['teams']
stats = self._get_stats_for_resource()
delete_func(project_details, resource, stats, languages)
def _delete_resource(self, project_details, resource, stats, *args):
"""Delete a resource from Transifex."""
project_slug, resource_slug = resource.split('.', 1)
project_resource_slugs = [
r['slug'] for r in project_details['resources']
]
logger.info("Deleting resource %s:" % resource)
if resource_slug not in project_resource_slugs:
if not self.skip:
msg = "Skipping: %s : Resource does not exist."
logger.info(msg % resource)
return
if not self.force:
slang = self.get_resource_option(resource, 'source_lang')
for language in stats:
if language == slang:
continue
if int(stats[language]['translated_entities']) > 0:
msg = (
"Skipping: %s : Unable to delete resource because it "
"has a not empty %s translation.\nPlease use -f or "
"--force option to delete this resource."
)
logger.info(msg % (resource, language))
return
try:
self.do_url_request('delete_resource', method="DELETE")
self.config.remove_section(resource)
self.save()
msg = "Deleted resource %s of project %s."
logger.info(msg % (resource_slug, project_slug))
except Exception as e:
msg = "Unable to delete resource %s of project %s."
logger.error(msg % (resource_slug, project_slug))
if isinstance(e, SSLError) or not self.skip:
raise
def _delete_translations(self, project_details, resource, stats, languages):
"""Delete the specified translations for the specified resource."""
logger.info("Deleting translations from resource %s:" % resource)
for language in languages:
self._delete_translation(project_details, resource, stats, language)
def _delete_translation(self, project_details, resource, stats, language):
"""Delete a specific translation from the specified resource."""
project_slug, resource_slug = resource.split('.', 1)
if language not in stats:
if not self.skip:
msg = "Skipping %s: Translation does not exist."
logger.warning(msg % (language))
return
if not self.force:
teams = project_details['teams']
if language in teams:
msg = (
"Skipping %s: Unable to delete translation because it is "
"associated with a team.\nPlease use -f or --force option "
"to delete this translation."
)
logger.warning(msg % language)
return
if int(stats[language]['translated_entities']) > 0:
msg = (
"Skipping %s: Unable to delete translation because it "
"is not empty.\nPlease use -f or --force option to delete "
"this translation."
)
logger.warning(msg % language)
return
try:
self.do_url_request(
'delete_translation', language=language, method="DELETE"
)
msg = "Deleted language %s from resource %s of project %s."
logger.info(msg % (language, resource_slug, project_slug))
except Exception as e:
msg = "Unable to delete translation %s"
logger.error(msg % language)
if isinstance(e, SSLError) or not self.skip:
raise
def do_url_request(self, api_call, multipart=False, data=None,
files=[], method="GET", **kwargs):
"""
Issues a url request.
"""
# Read the credentials from the config file (.transifexrc)
host = self.url_info['host']
try:
username = self.txrc.get(host, 'username')
passwd = self.txrc.get(host, 'password')
token = self.txrc.get(host, 'token')
hostname = self.txrc.get(host, 'hostname')
except configparser.NoSectionError:
raise Exception("No user credentials found for host %s. Edit"
" ~/.transifexrc and add the appropriate info in there." %
host)
# Create the Url
kwargs['hostname'] = hostname
kwargs.update(self.url_info)
url = API_URLS[api_call] % kwargs
if multipart:
for info, filename in files:
#FIXME: It works because we only pass to files argument
#only one item
name = os.path.basename(filename)
data = {
"resource": info.split(';')[0],
"language": info.split(';')[1],
"uploaded_file": (name, open(filename, 'rb').read())
}
return make_request(method, hostname, url, username, passwd, data)
def _should_update_translation(self, lang, stats, local_file, force=False,
mode=None):
"""Whether a translation should be udpated from Transifex.
We use the following criteria for that:
- If user requested to force the download.
- If language exists in Transifex.
- If the local file is older than the Transifex's file.
- If the user requested a x% completion.
Args:
lang: The language code to check.
stats: The (global) statistics object.
local_file: The local translation file.
force: A boolean flag.
mode: The mode for the translation.
Returns:
True or False.
"""
return self._should_download(lang, stats, local_file, force)
def _should_add_translation(self, lang, stats, force=False, mode=None):
"""Whether a translation should be added from Transifex.
We use the following criteria for that:
- If user requested to force the download.
- If language exists in Transifex.
- If the user requested a x% completion.
Args:
lang: The language code to check.
stats: The (global) statistics object.
force: A boolean flag.
mode: The mode for the translation.
Returns:
True or False.
"""
return self._should_download(lang, stats, None, force)
def _should_download(self, lang, stats, local_file=None, force=False,
mode=None):
"""Return whether a translation should be downloaded.
If local_file is None, skip the timestamps check (the file does
not exist locally).
"""
try:
lang_stats = stats[lang]
except KeyError as e:
logger.debug("No lang %s in statistics" % lang)
return False
satisfies_min = self._satisfies_min_translated(lang_stats, mode)
if not satisfies_min:
return False
if force:
logger.debug("Downloading translation due to -f")
return True
if local_file is not None:
remote_update = self._extract_updated(lang_stats)
if not self._remote_is_newer(remote_update, local_file):
logger.debug("Local is newer than remote for lang %s" % lang)
return False
return True
def _should_push_translation(self, lang, stats, local_file, force=False):
"""Return whether a local translation file should be
pushed to Trasnifex.
We use the following criteria for that:
- If user requested to force the upload.
- If language exists in Transifex.
- If local file is younger than the remote file.
Args:
lang: The language code to check.
stats: The (global) statistics object.
local_file: The local translation file.
force: A boolean flag.
Returns:
True or False.
"""
if force:
logger.debug("Push translation due to -f.")
return True
try:
lang_stats = stats[lang]
except KeyError as e:
logger.debug("Language %s does not exist in Transifex." % lang)
return True
if local_file is not None:
remote_update = self._extract_updated(lang_stats)
if self._remote_is_newer(remote_update, local_file):
msg = "Remote translation is newer than local file for lang %s"
logger.debug(msg % lang)
return False
return True
def _generate_timestamp(self, update_datetime):
"""Generate a UNIX timestamp from the argument.
Args:
update_datetime: The datetime in the format used by Transifex.
Returns:
A float, representing the timestamp that corresponds to the
argument.
"""
time_format = "%Y-%m-%d %H:%M:%S"
return time.mktime(
datetime.datetime(
*time.strptime(update_datetime, time_format)[0:5]
).utctimetuple()
)
def _get_time_of_local_file(self, path):
"""Get the modified time of the path_.
Args:
path: The path we want the mtime for.
Returns:
The time as a timestamp or None, if the file does not exist
"""
if not os.path.exists(path):
return None
return time.mktime(time.gmtime(os.path.getmtime(path)))
def _satisfies_min_translated(self, stats, mode=None):
"""Check whether a translation fulfills the filter used for
minimum translated percentage.
Args:
perc: The current translation percentage.
Returns:
True or False
"""
cur = self._extract_completed(stats, mode)
option_name = 'minimum_perc'
if self.minimum_perc is not None:
minimum_percent = self.minimum_perc
else:
global_minimum = int(
self.get_resource_option('main', option_name) or 0
)
resource_minimum = int(
self.get_resource_option(
self.resource, option_name
) or global_minimum
)
minimum_percent = resource_minimum
return cur >= minimum_percent
def _remote_is_newer(self, remote_updated, local_file):
"""Check whether the remote translation is newer that the local file.
Args:
remote_updated: The date and time the translation was last
updated remotely.
local_file: The local file.
Returns:
True or False.
"""
if remote_updated is None:
logger.debug("No remote time")
return False
remote_time = self._generate_timestamp(remote_updated)
local_time = self._get_time_of_local_file(
self.get_full_path(local_file)
)
logger.debug(
"Remote time is %s and local %s" % (remote_time, local_time)
)
if local_time is not None and remote_time < local_time:
return False
return True
@classmethod
def _extract_completed(cls, stats, mode=None):
"""Extract the information for the translated percentage from the stats.
Args:
stats: The stats object for a language as returned by Transifex.
mode: The mode of translations requested.
Returns:
The percentage of translation as integer.
"""
if mode == 'reviewed':
key = 'reviewed_percentage'
else:
key = 'completed'
try:
return int(stats[key][:-1])
except KeyError as e:
return 0
@classmethod
def _extract_updated(cls, stats):
"""Extract the information for the last update of a translation.
Args:
stats: The stats object for a language as returned by Transifex.
Returns:
The last update field.
"""
try:
return stats['last_update']
except KeyError as e:
return None
def _download_pseudo(self, project_slug, resource_slug, pseudo_file):
response, charset = self.do_url_request(
'pull_pseudo_file',
resource_slug=resource_slug,
project_slug=project_slug
)
response = parse_json(response)
base_dir = os.path.split(pseudo_file)[0]
mkdir_p(base_dir)
with open(pseudo_file, "wb") as fd:
fd.write(response['content'].encode("utf-8"))
def _new_translations_to_add(self, files, slang, lang_map,
stats, force=False):
"""Return a list of translations which are new to the
local installation.
"""
new_translations = []
timestamp = time.time()
langs = list(stats.keys())
logger.debug("Available languages are: %s" % langs)
for lang in langs:
lang_exists = lang in list(files.keys())
lang_is_source = lang == slang
mapped_lang_exists = (
lang in lang_map and lang_map[lang] in list(files.keys())
)
if lang_exists or lang_is_source or mapped_lang_exists:
continue
if self._should_add_translation(lang, stats, force):
new_translations.append(lang)
return set(new_translations)
def _get_stats_for_resource(self):
"""Get the statistics information for a resource."""
try:
r, charset = self.do_url_request('resource_stats')
logger.debug("Statistics response is %s" % r)
stats = parse_json(r)
except HttpNotFound:
logger.debug("Resource not found, creating...")
stats = {}
except Exception as e:
logger.debug(six.u(str(e)))
raise
return stats
def get_chosen_resources(self, resources):
"""Get the resources the user selected.
Support wildcards in the resources specified by the user.
Args:
resources: A list of resources as specified in command-line or
an empty list.
Returns:
A list of resources.
"""
configured_resources = self.get_resource_list()
if not resources:
return configured_resources
selected_resources = []
for resource in resources:
found = False
for full_name in configured_resources:
if fnmatch.fnmatch(full_name, resource):
selected_resources.append(full_name)
found = True
if not found:
msg = "Specified resource '%s' does not exist."
raise Exception(msg % resource)
logger.debug("Operating on resources: %s" % selected_resources)
return selected_resources
def _languages_to_pull(self, languages, files, lang_map, stats, force):
"""Get a set of langauges to pull.
Args:
languages: A list of languages the user selected in cmd.
files: A dictionary of current local translation files.
Returns:
A tuple of a set of existing languages and new translations.
"""
if not languages:
pull_languages = set([])
pull_languages |= set(files.keys())
mapped_files = []
for lang in pull_languages:
if lang in lang_map.flip:
mapped_files.append(lang_map.flip[lang])
pull_languages -= set(lang_map.flip.keys())
pull_languages |= set(mapped_files)
return (pull_languages, set([]))
else:
pull_languages = []
new_translations = []
f_langs = list(files.keys())
for l in languages:
if l not in f_langs and not (l in lang_map and lang_map[l] in f_langs):
if self._should_add_translation(l, stats, force):
new_translations.append(l)
else:
if l in list(lang_map.keys()):
l = lang_map[l]
pull_languages.append(l)
return (set(pull_languages), set(new_translations))
def _extension_for(self, i18n_type):
"""Return the extension used for the specified type."""
try:
json, charset = self.do_url_request('formats')
res = parse_json(json)
return res[i18n_type]['file-extensions'].split(',')[0]
except Exception as e:
logger.error(e)
return ''
def _resource_exists(self, stats):
"""Check if resource exists.
Args:
stats: The statistics dict as returned by Tx.
Returns:
True, if the resource exists in the server.
"""
return bool(stats)
def _create_resource(self, resource, pslug, fileinfo, filename, **kwargs):
"""Create a resource.
Args:
resource: The full resource name.
pslug: The slug of the project.
fileinfo: The information of the resource.
filename: The name of the file.
Raises:
URLError, in case of a problem.
"""
multipart = True
method = "POST"
api_call = 'create_resource'
host = self.url_info['host']
try:
username = self.txrc.get(host, 'username')
passwd = self.txrc.get(host, 'password')
token = self.txrc.get(host, 'token')
hostname = self.txrc.get(host, 'hostname')
except configparser.NoSectionError:
raise Exception("No user credentials found for host %s. Edit"
" ~/.transifexrc and add the appropriate info in there." %
host)
# Create the Url
kwargs['hostname'] = hostname
kwargs.update(self.url_info)
kwargs['project'] = pslug
url = (API_URLS[api_call] % kwargs).encode('UTF-8')
i18n_type = self._get_option(resource, 'type')
if i18n_type is None:
raise Exception(
"Please define the resource type in .tx/config (eg. type = PO)."
" More info: http://bit.ly/txcl-rt"
)
data = {
"slug": fileinfo.split(';')[0],
"name": fileinfo.split(';')[0],
"uploaded_file": (filename, open(filename, 'rb').read()),
"i18n_type": i18n_type
}
r, charset = make_request(method, hostname, url, username, passwd, data)
return r
def _get_option(self, resource, option):
"""Get the value for the option in the config file.
If the option is not in the resource section, look for it in
the project.
Args:
resource: The resource name.
option: The option the value of which we are interested in.
Returns:
The option value or None, if it does not exist.
"""
value = self.get_resource_option(resource, option)
if value is None:
if self.config.has_option('main', option):
return self.config.get('main', option)
return value
def set_i18n_type(self, resources, i18n_type):
"""Set the type for the specified resources."""
self._set_resource_option(resources, key='type', value=i18n_type)
def set_min_perc(self, resources, perc):
"""Set the minimum percentage for the resources."""
self._set_resource_option(resources, key='minimum_perc', value=perc)
def set_default_mode(self, resources, mode):
"""Set the default mode for the specified resources."""
self._set_resource_option(resources, key='mode', value=mode)
def _set_resource_option(self, resources, key, value):
"""Set options in the config file.
If resources is empty. set the option globally.
"""
if not resources:
self.config.set('main', key, value)
return
for r in resources:
self.config.set(r, key, value)
| agpl-3.0 | -8,030,408,997,415,867,000 | 37.611111 | 98 | 0.519301 | false | 4.49963 | true | false | false |
hmflash/Cura | cura/QualityManager.py | 1 | 16324 | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
# This collects a lot of quality and quality changes related code which was split between ContainerManager
# and the MachineManager and really needs to usable from both.
from typing import List
from UM.Application import Application
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.InstanceContainer import InstanceContainer
from cura.Settings.ExtruderManager import ExtruderManager
class QualityManager:
## Get the singleton instance for this class.
@classmethod
def getInstance(cls) -> "QualityManager":
# Note: Explicit use of class name to prevent issues with inheritance.
if not QualityManager.__instance:
QualityManager.__instance = cls()
return QualityManager.__instance
__instance = None # type: "QualityManager"
## Find a quality by name for a specific machine definition and materials.
#
# \param quality_name
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality container \type{ContainerInstance}
def findQualityByName(self, quality_name, machine_definition=None, material_containers=None):
criteria = {"type": "quality", "name": quality_name}
result = self._getFilteredContainersForStack(machine_definition, material_containers, **criteria)
# Fall back to using generic materials and qualities if nothing could be found.
if not result and material_containers and len(material_containers) == 1:
basic_materials = self._getBasicMaterials(material_containers[0])
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result[0] if result else None
## Find a quality changes container by name.
#
# \param quality_changes_name \type{str} the name of the quality changes container.
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality changes containers \type{List[ContainerInstance]}
def findQualityChangesByName(self, quality_changes_name, machine_definition=None):
criteria = {"type": "quality_changes", "name": quality_changes_name}
result = self._getFilteredContainersForStack(machine_definition, [], **criteria)
return result
## Fetch the list of available quality types for this combination of machine definition and materials.
#
# \param machine_definition \type{DefinitionContainer}
# \param material_containers \type{List[InstanceContainer]}
# \return \type{List[str]}
def findAllQualityTypesForMachineAndMaterials(self, machine_definition, material_containers):
# Determine the common set of quality types which can be
# applied to all of the materials for this machine.
quality_type_dict = self.__fetchQualityTypeDictForMaterial(machine_definition, material_containers[0])
common_quality_types = set(quality_type_dict.keys())
for material_container in material_containers[1:]:
next_quality_type_dict = self.__fetchQualityTypeDictForMaterial(machine_definition, material_container)
common_quality_types.intersection_update(set(next_quality_type_dict.keys()))
return list(common_quality_types)
## Fetches a dict of quality types names to quality profiles for a combination of machine and material.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \param material \type{ContainerInstance} the material.
# \return \type{Dict[str, ContainerInstance]} the dict of suitable quality type names mapping to qualities.
def __fetchQualityTypeDictForMaterial(self, machine_definition, material):
qualities = self.findAllQualitiesForMachineMaterial(machine_definition, material)
quality_type_dict = {}
for quality in qualities:
quality_type_dict[quality.getMetaDataEntry("quality_type")] = quality
return quality_type_dict
## Find a quality container by quality type.
#
# \param quality_type \type{str} the name of the quality type to search for.
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality container \type{ContainerInstance}
def findQualityByQualityType(self, quality_type, machine_definition=None, material_containers=None, **kwargs):
criteria = kwargs
criteria["type"] = "quality"
if quality_type:
criteria["quality_type"] = quality_type
result = self._getFilteredContainersForStack(machine_definition, material_containers, **criteria)
# Fall back to using generic materials and qualities if nothing could be found.
if not result and material_containers and len(material_containers) == 1:
basic_materials = self._getBasicMaterials(material_containers[0])
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result[0] if result else None
## Find all suitable qualities for a combination of machine and material.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \param material_container \type{ContainerInstance} the material.
# \return \type{List[ContainerInstance]} the list of suitable qualities.
def findAllQualitiesForMachineMaterial(self, machine_definition, material_container):
criteria = {"type": "quality" }
result = self._getFilteredContainersForStack(machine_definition, [material_container], **criteria)
if not result:
basic_materials = self._getBasicMaterials(material_container)
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result
## Find all quality changes for a machine.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \return \type{List[InstanceContainer]} the list of quality changes
def findAllQualityChangesForMachine(self, machine_definition: DefinitionContainer) -> List[InstanceContainer]:
if machine_definition.getMetaDataEntry("has_machine_quality"):
definition_id = machine_definition.getId()
else:
definition_id = "fdmprinter"
filter_dict = { "type": "quality_changes", "extruder": None, "definition": definition_id }
quality_changes_list = ContainerRegistry.getInstance().findInstanceContainers(**filter_dict)
return quality_changes_list
## Find all usable qualities for a machine and extruders.
#
# Finds all of the qualities for this combination of machine and extruders.
# Only one quality per quality type is returned. i.e. if there are 2 qualities with quality_type=normal
# then only one of then is returned (at random).
#
# \param global_container_stack \type{ContainerStack} the global machine definition
# \param extruder_stacks \type{List[ContainerStack]} the list of extruder stacks
# \return \type{List[InstanceContainer]} the list of the matching qualities. The quality profiles
# return come from the first extruder in the given list of extruders.
def findAllUsableQualitiesForMachineAndExtruders(self, global_container_stack, extruder_stacks):
global_machine_definition = global_container_stack.getBottom()
if extruder_stacks:
# Multi-extruder machine detected.
materials = [stack.findContainer(type="material") for stack in extruder_stacks]
else:
# Machine with one extruder.
materials = [global_container_stack.findContainer(type="material")]
quality_types = self.findAllQualityTypesForMachineAndMaterials(global_machine_definition, materials)
# Map the list of quality_types to InstanceContainers
qualities = self.findAllQualitiesForMachineMaterial(global_machine_definition, materials[0])
quality_type_dict = {}
for quality in qualities:
quality_type_dict[quality.getMetaDataEntry("quality_type")] = quality
return [quality_type_dict[quality_type] for quality_type in quality_types]
## Fetch more basic versions of a material.
#
# This tries to find a generic or basic version of the given material.
# \param material_container \type{InstanceContainer} the material
# \return \type{List[InstanceContainer]} a list of the basic materials or an empty list if one could not be found.
def _getBasicMaterials(self, material_container):
base_material = material_container.getMetaDataEntry("material")
material_container_definition = material_container.getDefinition()
if material_container_definition and material_container_definition.getMetaDataEntry("has_machine_quality"):
definition_id = material_container.getDefinition().getMetaDataEntry("quality_definition", material_container.getDefinition().getId())
else:
definition_id = "fdmprinter"
if base_material:
# There is a basic material specified
criteria = { "type": "material", "name": base_material, "definition": definition_id }
containers = ContainerRegistry.getInstance().findInstanceContainers(**criteria)
containers = [basic_material for basic_material in containers if
basic_material.getMetaDataEntry("variant") == material_container.getMetaDataEntry(
"variant")]
return containers
return []
def _getFilteredContainers(self, **kwargs):
return self._getFilteredContainersForStack(None, None, **kwargs)
def _getFilteredContainersForStack(self, machine_definition=None, material_containers=None, **kwargs):
# Fill in any default values.
if machine_definition is None:
machine_definition = Application.getInstance().getGlobalContainerStack().getBottom()
quality_definition_id = machine_definition.getMetaDataEntry("quality_definition")
if quality_definition_id is not None:
machine_definition = ContainerRegistry.getInstance().findDefinitionContainers(id=quality_definition_id)[0]
if material_containers is None:
active_stacks = ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks()
material_containers = [stack.findContainer(type="material") for stack in active_stacks]
criteria = kwargs
filter_by_material = False
machine_definition = self.getParentMachineDefinition(machine_definition)
whole_machine_definition = self.getWholeMachineDefinition(machine_definition)
if whole_machine_definition.getMetaDataEntry("has_machine_quality"):
definition_id = machine_definition.getMetaDataEntry("quality_definition", whole_machine_definition.getId())
criteria["definition"] = definition_id
filter_by_material = whole_machine_definition.getMetaDataEntry("has_materials")
else:
criteria["definition"] = "fdmprinter"
# Stick the material IDs in a set
if material_containers is None or len(material_containers) == 0:
filter_by_material = False
else:
material_ids = set()
for material_instance in material_containers:
if material_instance is not None:
# Add the parent material too.
for basic_material in self._getBasicMaterials(material_instance):
material_ids.add(basic_material.getId())
material_ids.add(material_instance.getId())
containers = ContainerRegistry.getInstance().findInstanceContainers(**criteria)
result = []
for container in containers:
# If the machine specifies we should filter by material, exclude containers that do not match any active material.
if filter_by_material and container.getMetaDataEntry("material") not in material_ids and not "global_quality" in kwargs:
continue
result.append(container)
return result
## Get the parent machine definition of a machine definition.
#
# \param machine_definition \type{DefinitionContainer} This may be a normal machine definition or
# an extruder definition.
# \return \type{DefinitionContainer} the parent machine definition. If the given machine
# definition doesn't have a parent then it is simply returned.
def getParentMachineDefinition(self, machine_definition: DefinitionContainer) -> DefinitionContainer:
container_registry = ContainerRegistry.getInstance()
machine_entry = machine_definition.getMetaDataEntry("machine")
if machine_entry is None:
# We have a normal (whole) machine defintion
quality_definition = machine_definition.getMetaDataEntry("quality_definition")
if quality_definition is not None:
parent_machine_definition = container_registry.findDefinitionContainers(id=quality_definition)[0]
return self.getParentMachineDefinition(parent_machine_definition)
else:
return machine_definition
else:
# This looks like an extruder. Find the rest of the machine.
whole_machine = container_registry.findDefinitionContainers(id=machine_entry)[0]
parent_machine = self.getParentMachineDefinition(whole_machine)
if whole_machine is parent_machine:
# This extruder already belongs to a 'parent' machine def.
return machine_definition
else:
# Look up the corresponding extruder definition in the parent machine definition.
extruder_position = machine_definition.getMetaDataEntry("position")
parent_extruder_id = parent_machine.getMetaDataEntry("machine_extruder_trains")[extruder_position]
return container_registry.findDefinitionContainers(id=parent_extruder_id)[0]
## Get the whole/global machine definition from an extruder definition.
#
# \param machine_definition \type{DefinitionContainer} This may be a normal machine definition or
# an extruder definition.
# \return \type{DefinitionContainer}
def getWholeMachineDefinition(self, machine_definition):
machine_entry = machine_definition.getMetaDataEntry("machine")
if machine_entry is None:
# This already is a 'global' machine definition.
return machine_definition
else:
container_registry = ContainerRegistry.getInstance()
whole_machine = container_registry.findDefinitionContainers(id=machine_entry)[0]
return whole_machine
| agpl-3.0 | -6,735,441,099,129,865,000 | 55.076923 | 145 | 0.678878 | false | 4.690805 | false | false | false |
msyriac/orphics | bin/rot_recon.py | 1 | 4352 | from orphics.mpi import MPI
import orphics.pipelines as utils
import argparse
from enlib import enmap
# Parse command line
parser = argparse.ArgumentParser(description='Run south rotation test.')
parser.add_argument("-x", "--patch-width", type=float, default=40., help="Patch width in degrees.")
parser.add_argument("-y", "--patch-height", type=float, default=15., help="Patch height in degrees.")
parser.add_argument("-o", "--yoffset", type=float, default=60., help="Offset in declination of southern patch center.")
parser.add_argument("-p", "--full-sky-pixel", type=float, default=0.5,help="Full sky pixel resolution in arcminutes.")
parser.add_argument("-i", "--pix-inter", type=float, default=None,help="Intermediate patch pixelization.")
parser.add_argument("-l", "--lmax", type=int, default=7000,help="Lmax for full-sky lensing.")
parser.add_argument("-b", "--bin-lmax", type=int, default=3000,help="Lmax for binning.")
parser.add_argument("-N", "--Nsims", type=int, default=10,help="Number of sims.")
parser.add_argument("-m", "--meanfield", type=str, default=None,help="Meanfield file root.")
parser.add_argument('-s', "--skip-recon",action='store_true',help="Skip reconstruction.")
args = parser.parse_args()
# Intialize the rotation testing pipeline
pipe = utils.RotTestPipeline(full_sky_pix=args.full_sky_pixel,wdeg=args.patch_width,
hdeg=args.patch_height,yoffset=args.yoffset,
mpi_comm=MPI.COMM_WORLD,nsims=args.Nsims,lmax=args.lmax,pix_intermediate=args.pix_inter,
bin_lmax=args.bin_lmax)
cmb = {} # this will store CMB maps
ikappa = {} # this will store input kappa maps
mlist = ['e','s','r'] # e stands for patch native to equator, s for native to south, r for rotated from south to equator
mf = {}
# Check if a meanfield is provided
for m in mlist:
if args.meanfield is not None:
mf[m] = enmap.read_map(args.meanfield+"/meanfield_"+m+".hdf")
else:
mf[m] = 0.
for k,index in enumerate(pipe.tasks):
# Make CMB maps and kappa maps
cmb['s'],cmb['e'],ikappa['s'],ikappa['e'] = pipe.make_sim(index)
# Rotate CMB map and kappa
cmb['r'] = pipe.rotator.rotate(cmb['s'])
ikappa['r'] = pipe.rotator.rotate(ikappa['s'], order=5, mode="constant", cval=0.0, prefilter=True, mask_nan=True, safe=True)
# For each of e,s,r
for m in mlist:
# Calculate CMB power
cxc,kcmb,kcmb = pipe.fc[m].power2d(cmb[m])
pipe.mpibox.add_to_stats("cmb-"+m,pipe.binner[m].bin(cxc/pipe.w2[m])[1]) # Divide by w2 window correction
# Calculate input kappa power
ixi,kinput,_ = pipe.fc[m].power2d(ikappa[m])
ixi /= pipe.w2[m] # divide by w2 window correction
pipe.mpibox.add_to_stats("ixi-"+m,pipe.binner[m].bin(ixi)[1])
if args.skip_recon: continue
if pipe.rank==0: pipe.logger.info( "Reconstructing...")
# Reconstruct and subtract meanfield if any
recon = pipe.reconstruct(m,cmb[m]) - mf[m]
if pipe.rank==0: pipe.logger.info( "Powers...")
# Calculate raw Clkk power
rxr,krecon,_ = pipe.fc[m].power2d(recon)
rxr /= pipe.w4[m]
# Calculate recon cross input power
rxi = pipe.fc[m].f2power(kinput,krecon)
rxi /= pipe.w3[m]
# Calculate realization dependent N0 ("super dumb")
n0 = pipe.qest[m].N.super_dumb_N0_TTTT(cxc)/pipe.w2[m]**2.
# Calculate corrected Clkk power
rxr_n0 = rxr - n0
# Collect statistics
pipe.mpibox.add_to_stack("meanfield-"+m,recon)
pipe.mpibox.add_to_stats("rxr-"+m,pipe.binner[m].bin(rxr)[1])
pipe.mpibox.add_to_stats("rxi-"+m,pipe.binner[m].bin(rxi)[1])
pipe.mpibox.add_to_stats("n0-"+m,pipe.binner[m].bin(n0)[1])
pipe.mpibox.add_to_stats("rxr-n0-"+m,pipe.binner[m].bin(rxr_n0)[1])
if k==0 and pipe.rank==0:
import orphics.io as io
io.plot_img(cmb[m],io.dout_dir+"cmb_"+m+".png",high_res=True)
io.plot_img(recon,io.dout_dir+"recon_"+m+".png",high_res=True)
if pipe.rank==0: pipe.logger.info( "MPI Collecting...")
pipe.mpibox.get_stacks(verbose=False)
pipe.mpibox.get_stats(verbose=False)
if pipe.rank==0:
pipe.dump(save_meanfield=(args.meanfield is None),skip_recon=args.skip_recon)
| bsd-2-clause | 4,958,113,471,433,312,000 | 41.252427 | 128 | 0.637638 | false | 2.997245 | false | false | false |
gjhiggins/graphpath | graphpath/entail.py | 1 | 8355 | from __future__ import generators
from expr import Step, Class, Property
from util.anysets import Set, ImmutableSet
empty_set = ImmutableSet()
class RuleDict(dict):
"""A mapping of resources to GraphPath expressions.
The expression for a given resource is the Union()
of all the expressions assigned to that mapping.
"""
def __setitem__(self, res, expr):
"""Add a definition for a resource"""
if res in self:
extant = self[res]
if extant != expr:
dict.__setitem__(self, res, extant | expr)
else:
dict.__setitem__(self, res, expr)
class ClassRules(RuleDict, Step):
"""A dictionary of class definitions and, simultaneously,
the rule for rdf:type.
As a mapping, the key is a class resource and the value
is a GraphPath expression.
As a GraphPath step, match every implied rdf:type path.
"""
def values(self, pop, members):
"""It is very expensive to ask the rdf.type of a resource
under this rule evaluation system. For now, we defer to
the ground facts when asked a type."""
return empty_set
# full implementation disabled....
result = Set()
for clss in self:
if members & self[clss].initials(pop):
result.add(clss)
return result
def match(self, pop, classes):
"""Generate the extent set for a class or classes."""
result = Set()
for clss in classes:
if clss in self:
result |= self[clss].initials(pop)
return result
def __or__(self, other):
"""Prevent accidental modification
via redefinition or rdf:type."""
raise NotImplementedError
class PropertyRules(RuleDict):
"""A dictionary of property definitions.
The key is a property resource and the value is a
GraphPath expresion.
"""
class RuleBase:
"""A RuleBase is a mapping of classes and properties to their
definitions.
A class is indexed by an elementary Class(...)
step and its mapping is a definition in the form of
an absolute GraphPath expression.
A property is indexed by an elementary Property(...) step
and its mapping is a definition in the form of a relative
GraphPath expression.
Two attributes, self.classes and self.properties expose
the two rule populations individually.
"""
def __init__(self):
self.clear()
def clear(self):
"""Empty the rule base."""
self.classes = ClassRules()
self.properties = PropertyRules()
def __getstate__(self):
return self.properties, self.classes
def __setstate__(self, state):
self.properties, self.classes = state
def update(self, other):
"""Add all rules from another rulebase."""
for key in other:
self[key] = other[key]
def __setitem__(self, lvalue, expr):
"""Add a definition for a class or property.
Multiple definitions for the same class or property
are combined by union.
"""
if isinstance(lvalue, Class):
self.classes[lvalue.resource()] = expr
else:
assert isinstance(lvalue, Property)
self.properties[lvalue.resource()] = expr
def __getitem__(self, lvalue):
"""Map a class or property to its definition."""
if isinstance(lvalue, Class):
return self.classes[lvalue.resource()]
else:
assert isinstance(lvalue, Property)
return self.properties[lvalue.resource()]
def __contains__(self, lvalue):
"""Test if a class or property is defined."""
try:
__trial = self[lvalue]
del __trial
except KeyError:
return False
else:
return True
def __iter__(self):
"""Iterate all properties and classes in the rule base."""
for res in self.classes:
yield Class(res)
for res in self.properties:
if self.properties[res] is not self.classes:
yield Property(res)
def get(self, lvalue, default=None):
"""Map a class or property to its definition or None."""
try:
return self[lvalue]
except KeyError:
return default
class Sandbox:
"""A Sandbox is an environment for rule execution. It implements the
Population protocol and so can be queried with
expricit GraphPath expressions and implicitly by rules.
Rule dependencies a tracked, circular rules are iterated until stable,
and results are cached for the lifetime of the sandbox.
A sandbox requires the ground facts and rules to remain contant and there
must be only one thread executing in the sandbox.
Rules should be written so results depend only on information provided
by calling the sandbox methods. Rules must support the rule protocol
(see expr module) but need not be written using the expr module classes.
"""
def __init__(self, pop, rules):
"""Create a sandbox for the given facts and rules (both constant)."""
self._pop = pop
self._rules = rules
self._cache = {}
self._stack = []
self._circular = {}
# set the rdf:type rule for the local rdf:type symbol
self.rdf_type = pop.rdf_type
rules.properties[pop.rdf_type] = rules.classes
def match(self, prop, value):
"""Delegate the match function to a rule, if any,
otherwise return ground facts."""
if prop in self._rules.properties:
return self._evaluate(False, prop, value)
else:
return self._pop.match(prop, value)
def values(self, subj, prop):
"""Delegate the values function to a rule, if any,
otherwise return ground facts."""
if prop in self._rules.properties:
return self._evaluate(True, prop, subj)
else:
return self._pop.values(subj, prop)
def _evaluate(self, forward, prop, seed):
"""evaluate a rule for a property, prop,
in the direction, forward, with the argument, seed."""
pattern = forward, prop, seed # the present query as a tuple
stack = self._stack
circs = self._circular
cache = self._cache
# print " "*len(stack),pattern
# have we seen this query before?
if pattern in cache:
# is it a circular query (depends on its own result)?
if pattern in stack:
# register the query with its circular dependencies
depends = circs.setdefault(pattern, Set())
for ix in range(len(stack) - 1, -1, -1): # 2.2 syntax
depend = stack[ix]
if depend == pattern:
break
depends.add(depend)
# return previously obtained result
return cache[pattern]
# prepare to evaluate from scratch
seeds = Set([seed])
result = cache[pattern] = Set()
# get rule and ground facts
if forward:
rule = self._rules.properties[prop].values
result |= self._pop.values(seed, prop)
else:
rule = self._rules.properties[prop].match
result |= self._pop.match(prop, seed)
# maintain an evaluation stack to track circular dependencies
stack.append(pattern)
# attempt evaluation
result |= rule(self, seeds)
# if a circulation was detected we must iterate
if pattern in circs:
depends = circs[pattern]
while True:
init_count = len(result)
# invalidate cache for sub-queries that depend on result
for depend in depends:
del cache[depend]
result |= rule(self, seeds)
# if no new results were obtained we are finished
if len(result) == init_count:
break
# evaluation complete: cleanup stack
stack.pop()
return result
def __contains__(self, rid):
return rid in self._pop
def __iter__(self):
return iter(self._pop)
def __getitem__(self, rid):
return self._pop[rid]
| gpl-2.0 | -8,830,023,813,209,567,000 | 31.134615 | 77 | 0.59234 | false | 4.621128 | false | false | false |
Intel-Corporation/tensorflow | tensorflow/python/keras/optimizer_v2/ftrl.py | 1 | 9853 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Ftrl')
class Ftrl(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the FTRL algorithm.
See Algorithm 1 of this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
Initialization:
$$t = 0$$
$$n_{0} = 0$$
$$\sigma_{0} = 0$$
$$z_{0} = 0$$
Update ($$i$$ is variable index):
$$t = t + 1$$
$$n_{t,i} = n_{t-1,i} + g_{t,i}^{2}$$
$$\sigma_{t,i} = (\sqrt{n_{t,i}} - \sqrt{n_{t-1,i}}) / \alpha$$
$$z_{t,i} = z_{t-1,i} + g_{t,i} - \sigma_{t,i} * w_{t,i}$$
$$w_{t,i} = - ((\beta+\sqrt{n+{t}}) / \alpha + \lambda_{2})^{-1} * (z_{i} -
sgn(z_{i}) * \lambda_{1}) if \abs{z_{i}} > \lambda_{i} else 0$$
Check the documentation for the l2_shrinkage_regularization_strength
parameter for more details when shrinkage is enabled, where gradient is
replaced with gradient_with_shrinkage.
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
name='Ftrl',
l2_shrinkage_regularization_strength=0.0,
**kwargs):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate.
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.\
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If one of the arguments is invalid.
References
See [paper]
(https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)
"""
super(Ftrl, self).__init__(name, **kwargs)
if initial_accumulator_value < 0.0:
raise ValueError(
'initial_accumulator_value %f needs to be positive or zero' %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError('learning_rate_power %f needs to be negative or zero' %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
'l1_regularization_strength %f needs to be positive or zero' %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
'l2_regularization_strength %f needs to be positive or zero' %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
'l2_shrinkage_regularization_strength %f needs to be positive'
' or zero' % l2_shrinkage_regularization_strength)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('decay', self._initial_decay)
self._set_hyper('learning_rate_power', learning_rate_power)
self._set_hyper('l1_regularization_strength', l1_regularization_strength)
self._set_hyper('l2_regularization_strength', l2_regularization_strength)
self._initial_accumulator_value = initial_accumulator_value
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
self.add_slot(var, 'linear')
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
l1_regularization_strength = self._get_hyper('l1_regularization_strength',
var_dtype)
l2_regularization_strength = self._get_hyper('l2_regularization_strength',
var_dtype)
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
learning_rate_power,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
learning_rate_power,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
l1_regularization_strength = self._get_hyper('l1_regularization_strength',
var_dtype)
l2_regularization_strength = self._get_hyper('l2_regularization_strength',
var_dtype)
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
learning_rate_power,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
learning_rate_power,
use_locking=self._use_locking)
def get_config(self):
config = super(Ftrl, self).get_config()
config.update({
'learning_rate':
self._serialize_hyperparameter('learning_rate'),
'decay':
self._serialize_hyperparameter('decay'),
'initial_accumulator_value':
self._initial_accumulator_value,
'learning_rate_power':
self._serialize_hyperparameter('learning_rate_power'),
'l1_regularization_strength':
self._serializer_hyperparameter('l1_regularization_strength'),
'l2_regularization_strength':
self._serializer_hyperparameter('l2_regularization_strength'),
'l2_shrinkage_regularization_strength':
self._l2_shrinkage_regularization_strength,
})
return config
| apache-2.0 | -5,455,901,851,465,060,000 | 41.106838 | 80 | 0.628134 | false | 3.782342 | false | false | false |
faeli/joke | joke/fair/db_url.py | 1 | 2442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urlparse import urlparse, parse_qsl
except ImportError as e:
from urllib.parse import urlparse, parse_qsl
from .database import SQLiteDatabase
schemes = {
'sqlite': SQLiteDatabase
}
def parseresult_to_dict(parsed):
# urlparse in python 2.6 is broken so query will be empty and instead
# appended to path complete with '?'
path_parts = parsed.path[1:].split('?')
try:
query = path_parts[1]
except IndexError:
query = parsed.query
connect_kwargs = {'database': path_parts[0]}
if parsed.username:
connect_kwargs['user'] = parsed.username
if parsed.password:
connect_kwargs['password'] = parsed.password
if parsed.hostname:
connect_kwargs['host'] = parsed.hostname
if parsed.port:
connect_kwargs['port'] = parsed.port
# Adjust parameters for MySQL.
if parsed.scheme == 'mysql' and 'password' in connect_kwargs:
connect_kwargs['passwd'] = connect_kwargs.pop('password')
elif 'sqlite' in parsed.scheme and not connect_kwargs['database']:
connect_kwargs['database'] = ':memory:'
# Get additional connection args from the query string
qs_args = parse_qsl(query, keep_blank_values=True)
for key, value in qs_args:
if value.lower() == 'false':
value = False
elif value.lower() == 'true':
value = True
elif value.isdigit():
value = int(value)
elif '.' in value and all(p.isdigit() for p in value.split('.', 1)):
try:
value = float(value)
except ValueError:
pass
elif value.lower() in ('null', 'none'):
value = None
connect_kwargs[key] = value
return connect_kwargs
def connect(db_url, **connect_params):
parsed = urlparse(db_url)
connect_kwargs = parseresult_to_dict(parsed)
connect_kwargs.update(connect_params)
database_class = schemes.get(parsed.scheme)
if database_class is None:
if database_class in schemes:
raise RuntimeError('Attempted to use "%s" but a required library '
'could not be imported.' % parsed.scheme)
else:
raise RuntimeError('Unrecognized or unsupported scheme: "%s".'%
parsed.scheme)
return database_class(**connect_kwargs)
# | mit | -5,027,614,411,988,602,000 | 29.5375 | 78 | 0.604013 | false | 4.224913 | false | false | false |
a-nai/django-wiki | wiki/plugins/images/models.py | 1 | 5261 | from __future__ import unicode_literals
from __future__ import absolute_import
import os.path
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from . import settings
from wiki.models.pluginbase import RevisionPlugin, RevisionPluginRevision
from django.db.models import signals
from six.moves import range
if "sorl.thumbnail" not in django_settings.INSTALLED_APPS:
raise ImproperlyConfigured(
'wiki.plugins.images: needs sorl.thumbnail in INSTALLED_APPS')
def upload_path(instance, filename):
# Has to match original extension filename
upload_path = settings.IMAGE_PATH
upload_path = upload_path.replace(
'%aid', str(instance.plugin.image.article.id))
if settings.IMAGE_PATH_OBSCURIFY:
import uuid
upload_path = os.path.join(upload_path, uuid.uuid4().hex)
return os.path.join(upload_path, filename)
@python_2_unicode_compatible
class Image(RevisionPlugin):
# The plugin system is so awesome that the inheritor doesn't need to do
# anything! :D
def can_write(self, user):
if not settings.ANONYMOUS and (not user or user.is_anonymous()):
return False
return RevisionPlugin.can_write(self, user)
def can_delete(self, user):
return self.can_write(user)
class Meta:
verbose_name = _('image')
verbose_name_plural = _('images')
db_table = 'wiki_images_image' # Matches label of upcoming 0.1 release
if settings.APP_LABEL:
app_label = settings.APP_LABEL
def __str__(self):
title = (_('Image: %s') % self.current_revision.imagerevision.get_filename(
)) if self.current_revision else _('Current revision not set!!')
return str(title)
@python_2_unicode_compatible
class ImageRevision(RevisionPluginRevision):
image = models.ImageField(upload_to=upload_path,
max_length=2000, height_field='height',
width_field='width', blank=True, null=True,
storage=settings.STORAGE_BACKEND)
width = models.SmallIntegerField(blank=True, null=True)
height = models.SmallIntegerField(blank=True, null=True)
def get_filename(self):
if self.image:
try:
return self.image.name.split('/')[-1]
except OSError:
pass
return None
def path(self):
if self.image:
try:
#path1=self.image.path.split("/")[:-1]
path1=self.image.path[29:]
return path1
except OSError:
pass
return None
def dimensions(self):
return "{}x{}".format(self.width, self.height)
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.image.size
except ValueError:
return None
except OSError:
return None
def inherit_predecessor(self, image, skip_image_file=False):
"""
Inherit certain properties from predecessor because it's very
convenient. Remember to always call this method before
setting properties :)"""
predecessor = image.current_revision.imagerevision
self.plugin = predecessor.plugin
self.deleted = predecessor.deleted
self.locked = predecessor.locked
if not skip_image_file:
try:
self.image = predecessor.image
self.width = predecessor.width
self.height = predecessor.height
except IOError:
self.image = None
class Meta:
verbose_name = _('image revision')
verbose_name_plural = _('image revisions')
# Matches label of upcoming 0.1 release
db_table = 'wiki_images_imagerevision'
if settings.APP_LABEL:
app_label = settings.APP_LABEL
ordering = ('-created',)
def __str__(self):
title = _('Image Revsion: %d') % self.revision_number
return str(title)
def on_image_revision_delete(instance, *args, **kwargs):
if not instance.image:
return
# Remove image file
path = instance.image.path.split("/")[:-1]
instance.image.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
else:
max_depth = 1
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
dir_list = os.listdir(
os.path.join(django_settings.MEDIA_ROOT, delete_path))
except OSError:
# Path does not exist, so let's not try to remove it...
dir_list = None
if not (dir_list is None) and len(dir_list) == 0:
os.rmdir(delete_path)
signals.pre_delete.connect(on_image_revision_delete, ImageRevision)
| gpl-3.0 | -3,529,403,965,202,404,400 | 32.08805 | 83 | 0.617943 | false | 4.172086 | false | false | false |
fulfilio/trytond-gift-card | product.py | 1 | 3978 | # -*- coding: utf-8 -*-
from trytond.model import fields, ModelSQL, ModelView
from trytond.pool import PoolMeta
from trytond.pyson import Eval, Bool
__all__ = ['Product', 'GiftCardPrice']
__metaclass__ = PoolMeta
class Product:
"Product"
__name__ = 'product.product'
is_gift_card = fields.Boolean("Is Gift Card ?")
gift_card_delivery_mode = fields.Selection([
('virtual', 'Virtual'),
('physical', 'Physical'),
('combined', 'Combined'),
], 'Gift Card Delivery Mode')
allow_open_amount = fields.Boolean("Allow Open Amount ?")
gc_min = fields.Numeric("Gift Card Minimum Amount")
gc_max = fields.Numeric("Gift Card Maximum Amount")
gift_card_prices = fields.One2Many(
'product.product.gift_card.price', 'product', 'Gift Card Prices',
)
@classmethod
def view_attributes(cls):
return super(Product, cls).view_attributes() + [
('//group[@id="gift_card_info"]', 'states', {
'invisible': ~Bool(Eval('is_gift_card'))
})]
@staticmethod
def default_gift_card_delivery_mode():
return 'physical'
@staticmethod
def default_is_gift_card():
return False
@staticmethod
def default_allow_open_amount():
return False
@classmethod
def __setup__(cls):
super(Product, cls).__setup__()
cls._error_messages.update({
'inappropriate_product':
'Product %s is not appropriate under %s delivery mode',
'invalid_amount':
'Gift Card minimum amount must be smaller than gift card '
'maximum amount',
'negative_amount_not_allowed':
'Gift card amounts can not be negative'
})
@classmethod
def validate(cls, templates):
"""
Validates each product template
"""
super(Product, cls).validate(templates)
for template in templates:
template.check_type_and_mode()
template.check_gc_min_max()
def check_gc_min_max(self):
"""
Check minimum amount to be smaller than maximum amount
"""
if not self.allow_open_amount:
return
if self.gc_min < 0 or self.gc_max < 0:
self.raise_user_error("negative_amount_not_allowed")
if self.gc_min > self.gc_max:
self.raise_user_error("invalid_amount")
def check_type_and_mode(self):
"""
Type must be service only if delivery mode is virtual
Type must be goods only if delivery mode is combined or physical
"""
if not self.is_gift_card:
return
if (
self.gift_card_delivery_mode == 'virtual' and
self.type != 'service'
) or (
self.gift_card_delivery_mode in ['physical', 'combined'] and
self.type != 'goods'
):
self.raise_user_error(
"inappropriate_product", (
self.rec_name, self.gift_card_delivery_mode
)
)
class GiftCardPrice(ModelSQL, ModelView):
"Gift Card Price"
__name__ = 'product.product.gift_card.price'
_rec_name = 'price'
product = fields.Many2One(
"product.product", "Product", required=True, select=True
)
price = fields.Numeric("Price", required=True)
@classmethod
def __setup__(cls):
super(GiftCardPrice, cls).__setup__()
cls._error_messages.update({
'negative_amount': 'Price can not be negative'
})
@classmethod
def validate(cls, prices):
"""
Validate product price for gift card
"""
super(GiftCardPrice, cls).validate(prices)
for price in prices:
price.check_price()
def check_price(self):
"""
Price can not be negative
"""
if self.price < 0:
self.raise_user_error("negative_amount")
| bsd-3-clause | -2,309,821,036,936,442,000 | 26.246575 | 74 | 0.565108 | false | 3.981982 | false | false | false |
suprzer0/aoc2016 | day4/solve.py | 1 | 1719 | from collections import namedtuple, Counter
from itertools import groupby
import re
from string import ascii_lowercase
Room = namedtuple('Room', ['name', 'id', 'chksum'])
def common_chars(text):
chars = ''
cnt = Counter(text)
del cnt['-']
for group in groupby(cnt.most_common(), key=lambda c: c[1]):
chars += ''.join(sorted(g[0] for g in group[1]))
return chars
def decrypt(s, offset):
offset %= len(ascii_lowercase)
rotated = ascii_lowercase[offset:]+ascii_lowercase[:offset]
newmap = str.maketrans(ascii_lowercase+'-', rotated+' ')
return str.translate(s, newmap)
def solve_p1(data):
"""
"""
valid_roomid_sum = 0
for room in data:
if common_chars(room.name).startswith(room.chksum):
valid_roomid_sum += room.id
return valid_roomid_sum
def solve_p2(data):
"""
"""
for room in data:
if common_chars(room.name).startswith(room.chksum):
d = decrypt(room.name, room.id)
if d.replace(' ', '').startswith('northpoleobjects'):
return room.id
return None
room_re = re.compile(r'^(\w+(?:-\w+)*)-(\d+)\[(\w+)\]$')
def load_data(inputfile):
"""
Converts the data from the input file into something usable
by the solve functions.
"""
rooms = []
for line in inputfile:
matches = room_re.match(line)
name, roomid, chksum = matches.groups()
rooms.append(Room(name, int(roomid), chksum))
return rooms
if __name__ == '__main__':
with open('input.txt', 'r') as inputfile:
data = load_data(inputfile)
print("Part 1 Solution")
print(solve_p1(data))
print("Part 2 Solution")
print(solve_p2(data))
| mit | 5,726,174,863,873,327,000 | 25.045455 | 65 | 0.601513 | false | 3.458753 | false | false | false |
pschmitt/home-assistant | homeassistant/components/stream/__init__.py | 2 | 7096 | """Provide functionality to stream video source."""
import logging
import secrets
import threading
import voluptuous as vol
from homeassistant.const import CONF_FILENAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from .const import (
ATTR_ENDPOINTS,
ATTR_STREAMS,
CONF_DURATION,
CONF_LOOKBACK,
CONF_STREAM_SOURCE,
DOMAIN,
SERVICE_RECORD,
)
from .core import PROVIDERS
from .hls import async_setup_hls
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
STREAM_SERVICE_SCHEMA = vol.Schema({vol.Required(CONF_STREAM_SOURCE): cv.string})
SERVICE_RECORD_SCHEMA = STREAM_SERVICE_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.string,
vol.Optional(CONF_DURATION, default=30): int,
vol.Optional(CONF_LOOKBACK, default=0): int,
}
)
@bind_hass
def request_stream(hass, stream_source, *, fmt="hls", keepalive=False, options=None):
"""Set up stream with token."""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
if options is None:
options = {}
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
try:
streams = hass.data[DOMAIN][ATTR_STREAMS]
stream = streams.get(stream_source)
if not stream:
stream = Stream(hass, stream_source, options=options, keepalive=keepalive)
streams[stream_source] = stream
else:
# Update keepalive option on existing stream
stream.keepalive = keepalive
# Add provider
stream.add_provider(fmt)
if not stream.access_token:
stream.access_token = secrets.token_hex()
stream.start()
return hass.data[DOMAIN][ATTR_ENDPOINTS][fmt].format(stream.access_token)
except Exception:
raise HomeAssistantError("Unable to get stream")
async def async_setup(hass, config):
"""Set up stream."""
# Set log level to error for libav
logging.getLogger("libav").setLevel(logging.ERROR)
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = {}
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS]["hls"] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event):
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS].values():
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
async def async_record(call):
"""Call record stream service handler."""
await async_handle_record_service(hass, call)
hass.services.async_register(
DOMAIN, SERVICE_RECORD, async_record, schema=SERVICE_RECORD_SCHEMA
)
return True
class Stream:
"""Represents a single stream."""
def __init__(self, hass, source, options=None, keepalive=False):
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self.keepalive = keepalive
self.access_token = None
self._thread = None
self._thread_quit = None
self._outputs = {}
if self.options is None:
self.options = {}
@property
def outputs(self):
"""Return stream outputs."""
return self._outputs
def add_provider(self, fmt):
"""Add provider output stream."""
if not self._outputs.get(fmt):
provider = PROVIDERS[fmt](self)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider):
"""Remove provider output stream."""
if provider.name in self._outputs:
del self._outputs[provider.name]
self.check_idle()
if not self._outputs:
self.stop()
def check_idle(self):
"""Reset access token if all providers are idle."""
if all([p.idle for p in self._outputs.values()]):
self.access_token = None
def start(self):
"""Start a stream."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import stream_worker
if self._thread is None or not self._thread.isAlive():
self._thread_quit = threading.Event()
self._thread = threading.Thread(
name="stream_worker",
target=stream_worker,
args=(self.hass, self, self._thread_quit),
)
self._thread.start()
_LOGGER.info("Started stream: %s", self.source)
def stop(self):
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self):
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
_LOGGER.info("Stopped stream: %s", self.source)
async def async_handle_record_service(hass, call):
"""Handle save video service calls."""
stream_source = call.data[CONF_STREAM_SOURCE]
video_path = call.data[CONF_FILENAME]
duration = call.data[CONF_DURATION]
lookback = call.data[CONF_LOOKBACK]
# Check for file access
if not hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Check for active stream
streams = hass.data[DOMAIN][ATTR_STREAMS]
stream = streams.get(stream_source)
if not stream:
stream = Stream(hass, stream_source)
streams[stream_source] = stream
# Add recorder
recorder = stream.outputs.get("recorder")
if recorder:
raise HomeAssistantError(f"Stream already recording to {recorder.video_path}!")
recorder = stream.add_provider("recorder")
recorder.video_path = video_path
recorder.timeout = duration
stream.start()
# Take advantage of lookback
hls = stream.outputs.get("hls")
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), hls.num_segments)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segment())[-num_segments:])
| apache-2.0 | -3,241,758,894,251,153,000 | 29.718615 | 91 | 0.628241 | false | 4.092272 | false | false | false |
robk5uj/invenio | modules/bibcirculation/lib/bibcirculation_utils.py | 1 | 21415 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibCirculation Utils: Auxiliary methods of BibCirculation """
__revision__ = "$Id$"
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibtask import task_low_level_submission
import invenio.bibcirculation_dblayer as db
from invenio.urlutils import create_html_link
from invenio.config import CFG_SITE_URL, CFG_TMPDIR
from invenio.bibcirculation_config import CFG_BIBCIRCULATION_AMAZON_ACCESS_KEY, \
CFG_BIBCIRCULATION_WORKING_DAYS, \
CFG_BIBCIRCULATION_HOLIDAYS
from invenio.messages import gettext_set_language
import datetime, time, re
DICC_REGEXP = re.compile("^\{('[^']*': ?('[^']*'|[0-9]*|None)(, ?'[^']*': ?('[^']*'|[0-9]*|None))*)?\}$")
def hold_request_mail(recid, borrower_id):
"""
Create the mail who will be sent for each hold requests.
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@param borrower_id: identify the borrower. Primary key of crcBORROWER.
@type borrower_id: int
@return email(body)
"""
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(recid)
############## need some code refactoring ###############
more_holdings_infos = db.get_holdings_details(recid)
borrower_infos = db.get_borrower_details(borrower_id)
#########################################################
title_link = create_html_link(CFG_SITE_URL +
'/admin/bibcirculation/bibcirculationadmin.py/get_item_details',
{'recid': recid},
(book_title))
out = """
This is an automatic email for confirming the hold request for a
book on behalf of:
%s (email: %s)
title: %s
author: %s
location: %s
library: %s
publisher: %s
year: %s
isbn: %s
""" % (borrower_infos[1], borrower_infos[2],
title_link, book_author, more_holdings_infos[0][1],
more_holdings_infos[0][2],
book_editor, book_year, book_isbn)
return out
def get_book_cover(isbn):
"""
Retrieve book cover using Amazon web services.
@param isbn: book's isbn
@type isbn: string
@return book cover
"""
from xml.dom import minidom
import urllib
# connect to AWS
cover_xml = urllib.urlopen('http://ecs.amazonaws.com/onca/xml' \
'?Service=AWSECommerceService&AWSAccessKeyId=' \
+ CFG_BIBCIRCULATION_AMAZON_ACCESS_KEY + \
'&Operation=ItemSearch&Condition=All&' \
'ResponseGroup=Images&SearchIndex=Books&' \
'Keywords=' + isbn)
# parse XML
try:
xml_img = minidom.parse(cover_xml)
retrieve_book_cover = xml_img.getElementsByTagName('MediumImage')
book_cover = retrieve_book_cover.item(0).firstChild.firstChild.data
except AttributeError:
book_cover = "%s/img/book_cover_placeholder.gif" % (CFG_SITE_URL)
return book_cover
def book_information_from_MARC(recid):
"""
Retrieve book's information from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return tuple with title, year, author, isbn and editor.
"""
book_title = ' '.join(get_fieldvalues(recid, "245__a") + \
get_fieldvalues(recid, "245__b") + \
get_fieldvalues(recid, "245__n") + \
get_fieldvalues(recid, "245__p"))
book_year = ' '.join(get_fieldvalues(recid, "260__c"))
book_author = ' '.join(get_fieldvalues(recid, "100__a") + \
get_fieldvalues(recid, "100__u"))
book_isbn = ' '.join(get_fieldvalues(recid, "020__a"))
book_editor = ' , '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b"))
return (book_title, book_year, book_author, book_isbn, book_editor)
def book_title_from_MARC(recid):
"""
Retrieve book's title from MARC
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's title
"""
book_title = ' '.join(get_fieldvalues(recid, "245__a") + \
get_fieldvalues(recid, "245__b") + \
get_fieldvalues(recid, "245__n") + \
get_fieldvalues(recid, "245__p"))
return book_title
def update_status_if_expired(loan_id):
"""
Update the loan's status if status is 'expired'.
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
"""
loan_status = db.get_loan_status(loan_id)
if loan_status == 'expired':
db.update_loan_status('on loan', loan_id)
return
def get_next_day(date_string):
"""
Get the next day
@param date_string: date
@type date_string: string
return next day
"""
# add 1 day
more_1_day = datetime.timedelta(days=1)
# convert date_string to datetime format
tmp_date = time.strptime(date_string, '%Y-%m-%d')
# calculate the new date (next day)
next_day = datetime.datetime(*tmp_date[:3]) + more_1_day
return next_day
def generate_new_due_date(days):
"""
Generate a new due date (today + X days = new due date).
@param days: number of days
@type days: string
@return new due date
"""
today = datetime.date.today()
more_X_days = datetime.timedelta(days=days)
tmp_date = today + more_X_days
week_day = tmp_date.strftime('%A')
due_date = tmp_date.strftime('%Y-%m-%d')
due_date_validated = False
while not due_date_validated:
if week_day in CFG_BIBCIRCULATION_WORKING_DAYS and due_date not in CFG_BIBCIRCULATION_HOLIDAYS:
due_date_validated = True
else:
next_day = get_next_day(due_date)
due_date = next_day.strftime('%Y-%m-%d')
week_day = next_day.strftime('%A')
return due_date
def renew_loan_for_X_days(barcode):
"""
Renew a loan based on its loan period
@param barcode: identify the item. Primary key of crcITEM.
@type barcode: string
@return new due date
"""
loan_period = db.get_loan_period(barcode)
if loan_period == '4 weeks':
due_date = generate_new_due_date(30)
else:
due_date = generate_new_due_date(7)
return due_date
def make_copy_available(request_id):
"""
Change the status of a copy for 'available' when
an hold request was cancelled.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode_requested = db.get_requested_barcode(request_id)
db.update_item_status('available', barcode_requested)
return
def print_new_loan_information(req, ln):
"""
Create a printable format with the information of the last
loan who has been registered on the table crcLOAN.
"""
_ = gettext_set_language(ln)
# get the last loan from crcLOAN
(recid, borrower_id, due_date) = db.get_last_loan()
# get book's information
(book_title, book_year, book_author, book_isbn, book_editor) = book_information_from_MARC(recid)
# get borrower's data/information (name, address, email)
(borrower_name, borrower_address, borrower_email) = db.get_borrower_data(borrower_id)
# Generate printable format
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:95%; margin:auto; max-width: 600px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 400px;'>"""
out += """ <tr><td align="center"><h2><strong>%s</strong></h2></td></tr>""" % (_("Loan information"))
out += """ <tr><td align="center"><strong>%s</strong></td></tr>""" % (_("This book is sent to you ..."))
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 400px;'>"""
out += """<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
""" % (_("Title"), book_title,
_("Author"), book_author,
_("Editor"), book_editor,
_("ISBN"), book_isbn,
_("Year"), book_year)
out += """</table><br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 400px;'>"""
out += """<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr>
<tr>
<td width="70"><strong>%s</strong></td><td style='color: black;'>%s</td>
</tr> """ % (_("Id"), borrower_id,
_("Name"), borrower_name,
_("Address"), borrower_address,
_("Email"), borrower_email)
out += """</table> <br />"""
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 400px;'>"""
out += """ <tr><td align="center"><h2><strong>%s: %s</strong></h2></td></tr>""" % (_("Due date"), due_date)
out += """</table>"""
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 800px;'>
<tr><td><input type="button" onClick='window.print()'
value='Print' style='color: #fff; background: #36c; font-weight: bold;'></td></tr>
</table>"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def print_pending_hold_requests_information(req, ln):
"""
Create a printable format with all the information about all
pending hold requests.
"""
_ = gettext_set_language(ln)
requests = db.get_pdf_request_data('pending')
req.content_type = "text/html"
req.send_http_header()
out = """<table style='width:100%; margin:auto; max-width: 1024px;'>"""
out += """
<tr>
<td><img src="%s/img/CERN_CDS_logo.png"></td>
</tr>
</table><br />""" % (CFG_SITE_URL)
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 1024px;'>"""
out += """ <tr><td align="center"><h2><strong>%s</strong></h2></td></tr>""" % (_("List of pending hold requests"))
out += """ <tr><td align="center"><strong>%s</strong></td></tr>""" % (time.ctime())
out += """</table><br/>"""
out += """<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 1024px;'>"""
out += """<tr>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
<td><strong>%s</strong></td>
</tr>
""" % (_("Borrower"),
_("Item"),
_("Library"),
_("Location"),
_("From"),
_("To"),
_("Request date"))
for (recid, borrower_name, library_name, location, date_from, date_to, request_date) in requests:
out += """<tr style='color: black;'>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
<td class="bibcirccontent">%s</td>
</tr>
""" % (borrower_name, book_title_from_MARC(recid), library_name,
location, date_from, date_to, request_date)
out += """</table>
<br />
<br />
<table style='color: #79d; font-size: 82%; width:95%; margin:auto; max-width: 1024px;'>
<tr>
<td>
<input type=button value='Back' onClick="history.go(-1)"
style='color: #fff; background: #36c; font-weight: bold;'>
<input type="button" onClick='window.print()'
value='Print' style='color: #fff; background: #36c; font-weight: bold;'>
</td>
</tr>
</table>"""
req.write("<html>")
req.write(out)
req.write("</html>")
return "\n"
def get_item_info_for_search_result(recid):
"""
Get the item's info from MARC in order to create a
search result with more details
@param recid: identify the record. Primary key of bibrec.
@type recid: int
@return book's informations (author, editor and number of copies)
"""
book_author = ' '.join(get_fieldvalues(recid, "100__a") + \
get_fieldvalues(recid, "100__u"))
book_editor = ' , '.join(get_fieldvalues(recid, "260__a") + \
get_fieldvalues(recid, "260__b") + \
get_fieldvalues(recid, "260__c"))
book_copies = ' '.join(get_fieldvalues(recid, "964__a"))
book_infos = (book_author, book_editor, book_copies)
return book_infos
def update_request_data(request_id):
"""
Update the status of a given request.
@param request_id: identify the request: Primary key of crcLOANREQUEST
@type request_id: int
"""
barcode = db.get_request_barcode(request_id)
nb_requests = db.get_number_requests_per_copy(barcode)
is_on_loan = db.is_item_on_loan(barcode)
if nb_requests == 0 and is_on_loan is not None:
db.update_item_status('on loan', barcode)
elif nb_requests == 0 and is_on_loan is None:
db.update_item_status('available', barcode)
else:
db.update_item_status('requested', barcode)
return
def compare_dates(date):
"""
Compare given date with today
@param date: given date
@type date: string
@return boolean
"""
if date < time.strftime("%Y-%m-%d"):
return False
else:
return True
def validate_date_format(date):
"""
Verify the date format
@param date: given date
@type date: string
@return boolean
"""
try:
if time.strptime(date, "%Y-%m-%d"):
if compare_dates(date):
return True
else:
return False
except ValueError:
return False
def create_ill_record(book_info):
"""
Create a new ILL record
@param book_info: book's information
@type book_info: tuple
@return MARC record
"""
(title, author, place, publisher, year, edition, isbn) = book_info
ill_record = """
<record>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">%(isbn)s</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">%(author)s</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">%(title)s</subfield>
</datafield>
<datafield tag="250" ind1=" " ind2=" ">
<subfield code="a">%(edition)s</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="a">%(place)s</subfield>
<subfield code="b">%(publisher)s</subfield>
<subfield code="c">%(year)s</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ILLBOOK</subfield>
</datafield>
</record>
""" % {'isbn': isbn,
'author': author,
'title': title,
'edition': edition,
'place': place,
'publisher': publisher,
'year': year}
file_path = '%s/%s_%s.xml' % (CFG_TMPDIR, 'bibcirculation_ill_book',
time.strftime("%Y%m%d_%H%M%S"))
xml_file = open(file_path, 'w')
xml_file.write(ill_record)
xml_file.close()
# Pass XML file to BibUpload.
task_low_level_submission('bibupload', 'bibcirculation', '-P', '5', '-i',
file_path)
return ill_record
def wash_recid_from_ILL_request(ill_request_id):
"""
Get dictionnary and wash recid values.
@param ill_request_id: identify the ILL request. Primray key of crcILLREQUEST
@type ill_request_id: int
@return recid
"""
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
else:
book_info = None
try:
recid = int(book_info['recid'])
except KeyError:
recid = None
return recid
def get_list_of_ILL_requests():
"""
Get list with all recids related with ILL requests
"""
list_of_recids = []
ill_requests = db.get_ill_ids()
for i in range(len(ill_requests)):
recid = wash_recid_from_ILL_request(ill_requests[i][0])
if recid:
list_of_recids.append(recid)
return list_of_recids
def all_copies_are_missing(recid):
"""
Verify if all copies of an item are missing
@param recid: identify the record. Primary key of bibrec
@type recid: int
@return boolean
"""
copies_status = db.get_copies_status(recid)
number_of_missing = 0
for (status) in copies_status:
if status == 'missing':
number_of_missing += 1
if number_of_missing == len(copies_status):
return True
else:
return False
def has_copies(recid):
"""
Verify if a recid is item (has copies)
@param recid: identify the record. Primary key of bibrec
@type recid: int
@return boolean
"""
copies_status = db.get_copies_status(recid)
if copies_status is None:
return False
else:
if len(copies_status) == 0:
return False
else:
return True
def generate_email_body(template, loan_id):
"""
Generate the body of an email for loan recalls.
@param template: email template
@type template: string
@param loan_id: identify the loan. Primary key of crcLOAN.
@type loan_id: int
@return email(body)
"""
recid = db.get_loan_recid(loan_id)
(book_title, book_year, book_author,
book_isbn, book_editor) = book_information_from_MARC(int(recid))
out = template % (book_title, book_year, book_author,
book_isbn, book_editor)
return out
def create_item_details_url(recid, ln):
"""
Generate the URL redirecting to the edition of record copies
@param recid: The identifier of the record
@type recid: int
@param ln: The language identifier
@type ln: string
@return A string being the URL allowing to edit currently viewed record
"""
url = '/admin/bibcirculation/bibcirculationadmin.py/get_item_details?ln=%s&recid=%s' % (ln, str(recid))
return CFG_SITE_URL + url
def looks_like_dictionary(candidate_string):
if re.match(DICC_REGEXP, candidate_string):
return True
else:
return False
| gpl-2.0 | -8,326,159,641,433,594,000 | 29.505698 | 118 | 0.543638 | false | 3.661936 | false | false | false |
HiSPARC/publicdb | publicdb/raw_data/date_generator.py | 1 | 1373 | import datetime
def daterange(start, stop):
"""Generator for date ranges
This is a generator for date ranges. Based on a start and stop value,
it generates one day intervals.
:param start: a date instance, end of range
:param stop: a date instance, end of range
:yield date: dates with one day interval between start and stop
"""
if start == stop:
yield start
return
else:
yield start
cur = start
while cur < stop:
cur += datetime.timedelta(days=1)
yield cur
return
def single_day_ranges(start, end):
"""Generate datetime ranges consisting of a single day.
Generate datetime ranges, a single day at a time. The generator keeps
returning two datetime values, making up a range of a full day.
However, the first and last days may be shorter, if a specific
time-of-day was specified.
:param start: a datetime instance, start of range
:param end: a datetime instance, end of range
:yield cur,next: date intervals between start and stop
"""
cur = start
next_day = (cur.replace(hour=0, minute=0, second=0, microsecond=0) +
datetime.timedelta(days=1))
while next_day < end:
yield cur, next_day
cur = next_day
next_day = cur + datetime.timedelta(days=1)
yield cur, end
| gpl-3.0 | -3,784,288,338,580,211,700 | 27.604167 | 74 | 0.639476 | false | 4.224615 | false | false | false |
TileStache/TileStache | TileStache/Config.py | 1 | 15616 | """ The configuration bits of TileStache.
TileStache configuration is stored in JSON files, and is composed of two main
top-level sections: "cache" and "layers". There are examples of both in this
minimal sample configuration:
{
"cache": {"name": "Test"},
"layers": {
"example": {
"provider": {"name": "mapnik", "mapfile": "examples/style.xml"},,
"projection": "spherical mercator"
}
}
}
The contents of the "cache" section are described in greater detail in the
TileStache.Caches module documentation. Here is a different sample:
"cache": {
"name": "Disk",
"path": "/tmp/stache",
"umask": "0000"
}
The "layers" section is a dictionary of layer names which are specified in the
URL of an individual tile. More detail on the configuration of individual layers
can be found in the TileStache.Core module documentation. Another sample:
{
"cache": ...,
"layers":
{
"example-name":
{
"provider": { ... },
"metatile": { ... },
"preview": { ... },
"stale lock timeout": ...,
"projection": ...
}
}
}
Configuration also supports these additional settings:
- "logging": one of "debug", "info", "warning", "error" or "critical", as
described in Python's logging module: http://docs.python.org/howto/logging.html
- "index": configurable index pages for the front page of an instance.
A custom index can be specified as a filename relative to the configuration
location. Typically an HTML document would be given here, but other kinds of
files such as images can be used, with MIME content-type headers determined
by mimetypes.guess_type. A simple text greeting is displayed if no index
is provided.
In-depth explanations of the layer components can be found in the module
documentation for TileStache.Providers, TileStache.Core, and TileStache.Geography.
"""
import sys
import logging
from os.path import join as pathjoin
from mimetypes import guess_type
from json import dumps
try:
from json import dumps as json_dumps
except ImportError:
from simplejson import dumps as json_dumps
from ModestMaps.Geo import Location
from ModestMaps.Core import Coordinate
from . import Core
from . import Caches
from . import Providers
from . import Geography
from . import PixelEffects
from .py3_compat import reduce, urljoin, urlparse, urlopen
class Configuration:
""" A complete site configuration, with a collection of Layer objects.
Attributes:
cache:
Cache instance, e.g. TileStache.Caches.Disk etc.
See TileStache.Caches for details on what makes
a usable cache.
layers:
Dictionary of layers keyed by name.
When creating a custom layers dictionary, e.g. for dynamic
layer collections backed by some external configuration,
these dictionary methods must be provided for a complete
collection of layers:
keys():
Return list of layer name strings.
items():
Return list of (name, layer) pairs.
__contains__(key):
Return boolean true if given key is an existing layer.
__getitem__(key):
Return existing layer object for given key or raise KeyError.
dirpath:
Local filesystem path for this configuration,
useful for expanding relative paths.
Optional attribute:
index:
Mimetype, content tuple for default index response.
"""
def __init__(self, cache, dirpath):
self.cache = cache
self.dirpath = dirpath
self.layers = {}
self.index = 'text/plain', 'TileStache bellows hello.'
class Bounds:
""" Coordinate bounding box for tiles.
"""
def __init__(self, upper_left_high, lower_right_low):
""" Two required Coordinate objects defining tile pyramid bounds.
Boundaries are inclusive: upper_left_high is the left-most column,
upper-most row, and highest zoom level; lower_right_low is the
right-most column, furthest-dwn row, and lowest zoom level.
"""
self.upper_left_high = upper_left_high
self.lower_right_low = lower_right_low
def excludes(self, tile):
""" Check a tile Coordinate against the bounds, return true/false.
"""
if tile.zoom > self.upper_left_high.zoom:
# too zoomed-in
return True
if tile.zoom < self.lower_right_low.zoom:
# too zoomed-out
return True
# check the top-left tile corner against the lower-right bound
_tile = tile.zoomTo(self.lower_right_low.zoom)
if _tile.column > self.lower_right_low.column:
# too far right
return True
if _tile.row > self.lower_right_low.row:
# too far down
return True
# check the bottom-right tile corner against the upper-left bound
__tile = tile.right().down().zoomTo(self.upper_left_high.zoom)
if __tile.column < self.upper_left_high.column:
# too far left
return True
if __tile.row < self.upper_left_high.row:
# too far up
return True
return False
def __str__(self):
return 'Bound %s - %s' % (self.upper_left_high, self.lower_right_low)
class BoundsList:
""" Multiple coordinate bounding boxes for tiles.
"""
def __init__(self, bounds):
""" Single argument is a list of Bounds objects.
"""
self.bounds = bounds
def excludes(self, tile):
""" Check a tile Coordinate against the bounds, return false if none match.
"""
for bound in self.bounds:
if not bound.excludes(tile):
return False
# Nothing worked.
return True
def buildConfiguration(config_dict, dirpath='.'):
""" Build a configuration dictionary into a Configuration object.
The second argument is an optional dirpath that specifies where in the
local filesystem the parsed dictionary originated, to make it possible
to resolve relative paths. It might be a path or more likely a full
URL including the "file://" prefix.
"""
scheme, h, path, p, q, f = urlparse(dirpath)
if scheme in ('', 'file'):
sys.path.insert(0, path)
cache_dict = config_dict.get('cache', {})
cache = _parseConfigCache(cache_dict, dirpath)
config = Configuration(cache, dirpath)
for (name, layer_dict) in config_dict.get('layers', {}).items():
config.layers[name] = _parseConfigLayer(layer_dict, config, dirpath)
if 'index' in config_dict:
index_href = urljoin(dirpath, config_dict['index'])
index_body = urlopen(index_href).read()
index_type = guess_type(index_href)
config.index = index_type[0], index_body
if 'logging' in config_dict:
level = config_dict['logging'].upper()
if hasattr(logging, level):
logging.basicConfig(level=getattr(logging, level))
return config
def enforcedLocalPath(relpath, dirpath, context='Path'):
""" Return a forced local path, relative to a directory.
Throw an error if the combination of path and directory seems to
specify a remote path, e.g. "/path" and "http://example.com".
Although a configuration file can be parsed from a remote URL, some
paths (e.g. the location of a disk cache) must be local to the server.
In cases where we mix a remote configuration location with a local
cache location, e.g. "http://example.com/tilestache.cfg", the disk path
must include the "file://" prefix instead of an ambiguous absolute
path such as "/tmp/tilestache".
"""
parsed_dir = urlparse(dirpath)
parsed_rel = urlparse(relpath)
if parsed_rel.scheme not in ('file', ''):
raise Core.KnownUnknown('%s path must be a local file path, absolute or "file://", not "%s".' % (context, relpath))
if parsed_dir.scheme not in ('file', '') and parsed_rel.scheme != 'file':
raise Core.KnownUnknown('%s path must start with "file://" in a remote configuration ("%s" relative to %s)' % (context, relpath, dirpath))
if parsed_rel.scheme == 'file':
# file:// is an absolute local reference for the disk cache.
return parsed_rel.path
if parsed_dir.scheme == 'file':
# file:// is an absolute local reference for the directory.
return urljoin(parsed_dir.path, parsed_rel.path)
# nothing has a scheme, it's probably just a bunch of
# dumb local paths, so let's see what happens next.
return pathjoin(dirpath, relpath)
def _parseConfigCache(cache_dict, dirpath):
""" Used by parseConfig() to parse just the cache parts of a config.
"""
if 'name' in cache_dict:
_class = Caches.getCacheByName(cache_dict['name'])
kwargs = {}
def add_kwargs(*keys):
""" Populate named keys in kwargs from cache_dict.
"""
for key in keys:
if key in cache_dict:
kwargs[key] = cache_dict[key]
if _class is Caches.Test:
if cache_dict.get('verbose', False):
kwargs['logfunc'] = lambda msg: sys.stderr.write(msg + '\n')
elif _class is Caches.Disk:
kwargs['path'] = enforcedLocalPath(cache_dict['path'], dirpath, 'Disk cache path')
if 'umask' in cache_dict:
kwargs['umask'] = int(cache_dict['umask'], 8)
add_kwargs('dirs', 'gzip')
elif _class is Caches.Multi:
kwargs['tiers'] = [_parseConfigCache(tier_dict, dirpath)
for tier_dict in cache_dict['tiers']]
elif _class is Caches.Memcache.Cache:
if 'key prefix' in cache_dict:
kwargs['key_prefix'] = cache_dict['key prefix']
add_kwargs('servers', 'lifespan', 'revision')
elif _class is Caches.Redis.Cache:
if 'key prefix' in cache_dict:
kwargs['key_prefix'] = cache_dict['key prefix']
add_kwargs('host', 'port', 'db')
elif _class is Caches.S3.Cache:
add_kwargs('bucket', 'access', 'secret', 'use_locks', 'path', 'reduced_redundancy', 'policy')
else:
raise Exception('Unknown cache: %s' % cache_dict['name'])
elif 'class' in cache_dict:
_class = Core.loadClassPath(cache_dict['class'])
kwargs = cache_dict.get('kwargs', {})
kwargs = dict( [(str(k), v) for (k, v) in kwargs.items()] )
else:
raise Exception('Missing required cache name or class: %s' % json_dumps(cache_dict))
cache = _class(**kwargs)
return cache
def _parseLayerBounds(bounds_dict, projection):
"""
"""
north, west = bounds_dict.get('north', 89), bounds_dict.get('west', -180)
south, east = bounds_dict.get('south', -89), bounds_dict.get('east', 180)
high, low = bounds_dict.get('high', 31), bounds_dict.get('low', 0)
try:
ul_hi = projection.locationCoordinate(Location(north, west)).zoomTo(high)
lr_lo = projection.locationCoordinate(Location(south, east)).zoomTo(low)
except TypeError:
raise Core.KnownUnknown('Bad bounds for layer, need north, south, east, west, high, and low: ' + dumps(bounds_dict))
return Bounds(ul_hi, lr_lo)
def _parseConfigLayer(layer_dict, config, dirpath):
""" Used by parseConfig() to parse just the layer parts of a config.
"""
projection = layer_dict.get('projection', 'spherical mercator')
projection = Geography.getProjectionByName(projection)
#
# Add cache lock timeouts and preview arguments
#
layer_kwargs = {}
if 'cache lifespan' in layer_dict:
layer_kwargs['cache_lifespan'] = int(layer_dict['cache lifespan'])
if 'stale lock timeout' in layer_dict:
layer_kwargs['stale_lock_timeout'] = int(layer_dict['stale lock timeout'])
if 'write cache' in layer_dict:
layer_kwargs['write_cache'] = bool(layer_dict['write cache'])
if 'allowed origin' in layer_dict:
layer_kwargs['allowed_origin'] = str(layer_dict['allowed origin'])
if 'maximum cache age' in layer_dict:
layer_kwargs['max_cache_age'] = int(layer_dict['maximum cache age'])
if 'redirects' in layer_dict:
layer_kwargs['redirects'] = dict(layer_dict['redirects'])
if 'tile height' in layer_dict:
layer_kwargs['tile_height'] = int(layer_dict['tile height'])
if 'preview' in layer_dict:
preview_dict = layer_dict['preview']
for (key, func) in zip(('lat', 'lon', 'zoom', 'ext'), (float, float, int, str)):
if key in preview_dict:
layer_kwargs['preview_' + key] = func(preview_dict[key])
#
# Do the bounds
#
if 'bounds' in layer_dict:
if type(layer_dict['bounds']) is dict:
layer_kwargs['bounds'] = _parseLayerBounds(layer_dict['bounds'], projection)
elif type(layer_dict['bounds']) is list:
bounds = [_parseLayerBounds(b, projection) for b in layer_dict['bounds']]
layer_kwargs['bounds'] = BoundsList(bounds)
else:
raise Core.KnownUnknown('Layer bounds must be a dictionary, not: ' + dumps(layer_dict['bounds']))
#
# Do the metatile
#
meta_dict = layer_dict.get('metatile', {})
metatile_kwargs = {}
for k in ('buffer', 'rows', 'columns'):
if k in meta_dict:
metatile_kwargs[k] = int(meta_dict[k])
metatile = Core.Metatile(**metatile_kwargs)
#
# Do the per-format options
#
jpeg_kwargs = {}
png_kwargs = {}
if 'jpeg options' in layer_dict:
jpeg_kwargs = dict([(str(k), v) for (k, v) in layer_dict['jpeg options'].items()])
if 'png options' in layer_dict:
png_kwargs = dict([(str(k), v) for (k, v) in layer_dict['png options'].items()])
#
# Do pixel effect
#
pixel_effect = None
if 'pixel effect' in layer_dict:
pixel_effect_dict = layer_dict['pixel effect']
pixel_effect_name = pixel_effect_dict.get('name')
if pixel_effect_name in PixelEffects.all:
pixel_effect_kwargs = {}
for k, v in pixel_effect_dict.items():
if k != 'name':
pixel_effect_kwargs[str(k)] = float(v)
PixelEffectClass = PixelEffects.all[pixel_effect_name]
pixel_effect = PixelEffectClass(**pixel_effect_kwargs)
#
# Do the provider
#
provider_dict = layer_dict['provider']
if 'name' in provider_dict:
_class = Providers.getProviderByName(provider_dict['name'])
provider_kwargs = _class.prepareKeywordArgs(provider_dict)
elif 'class' in provider_dict:
_class = Core.loadClassPath(provider_dict['class'])
provider_kwargs = provider_dict.get('kwargs', {})
provider_kwargs = dict( [(str(k), v) for (k, v) in provider_kwargs.items()] )
else:
raise Exception('Missing required provider name or class: %s' % json_dumps(provider_dict))
#
# Finish him!
#
layer = Core.Layer(config, projection, metatile, **layer_kwargs)
layer.provider = _class(layer, **provider_kwargs)
layer.setSaveOptionsJPEG(**jpeg_kwargs)
layer.setSaveOptionsPNG(**png_kwargs)
layer.pixel_effect = pixel_effect
return layer
| bsd-3-clause | -1,455,473,924,395,118,800 | 32.367521 | 146 | 0.613473 | false | 3.986725 | true | false | false |
labcodes/django | tests/schema/models.py | 1 | 4719 | from django.apps.registry import Apps
from django.db import models
# Because we want to test creation and deletion of these as separate things,
# these models are all inserted into a separate Apps so the main test
# runner doesn't migrate them.
new_apps = Apps()
class Author(models.Model):
name = models.CharField(max_length=255)
height = models.PositiveIntegerField(null=True, blank=True)
weight = models.IntegerField(null=True, blank=True)
class Meta:
apps = new_apps
class AuthorTextFieldWithIndex(models.Model):
text_field = models.TextField(db_index=True)
class Meta:
apps = new_apps
class AuthorWithDefaultHeight(models.Model):
name = models.CharField(max_length=255)
height = models.PositiveIntegerField(null=True, blank=True, default=42)
class Meta:
apps = new_apps
class AuthorWithEvenLongerName(models.Model):
name = models.CharField(max_length=255)
height = models.PositiveIntegerField(null=True, blank=True)
class Meta:
apps = new_apps
class AuthorWithIndexedName(models.Model):
name = models.CharField(max_length=255, db_index=True)
class Meta:
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
# tags = models.ManyToManyField("Tag", related_name="books")
class Meta:
apps = new_apps
class BookWeak(models.Model):
author = models.ForeignKey(Author, models.CASCADE, db_constraint=False)
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
class Meta:
apps = new_apps
class BookWithLongName(models.Model):
author_foreign_key_with_really_long_field_name = models.ForeignKey(
AuthorWithEvenLongerName,
models.CASCADE,
)
class Meta:
apps = new_apps
class BookWithO2O(models.Model):
author = models.OneToOneField(Author, models.CASCADE)
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
class Meta:
apps = new_apps
db_table = "schema_book"
class BookWithSlug(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
slug = models.CharField(max_length=20, unique=True)
class Meta:
apps = new_apps
db_table = "schema_book"
class BookWithoutAuthor(models.Model):
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
class Meta:
apps = new_apps
db_table = "schema_book"
class BookForeignObj(models.Model):
title = models.CharField(max_length=100, db_index=True)
author_id = models.IntegerField()
class Meta:
apps = new_apps
class IntegerPK(models.Model):
i = models.IntegerField(primary_key=True)
j = models.IntegerField(unique=True)
class Meta:
apps = new_apps
db_table = "INTEGERPK" # uppercase to ensure proper quoting
class Note(models.Model):
info = models.TextField()
class Meta:
apps = new_apps
class NoteRename(models.Model):
detail_info = models.TextField()
class Meta:
apps = new_apps
db_table = "schema_note"
class Tag(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
class Meta:
apps = new_apps
class TagIndexed(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
class Meta:
apps = new_apps
index_together = [["slug", "title"]]
class TagM2MTest(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
class Meta:
apps = new_apps
class TagUniqueRename(models.Model):
title = models.CharField(max_length=255)
slug2 = models.SlugField(unique=True)
class Meta:
apps = new_apps
db_table = "schema_tag"
# Based on tests/reserved_names/models.py
class Thing(models.Model):
when = models.CharField(max_length=1, primary_key=True)
class Meta:
db_table = 'drop'
def __str__(self):
return self.when
class UniqueTest(models.Model):
year = models.IntegerField()
slug = models.SlugField(unique=False)
class Meta:
apps = new_apps
unique_together = ["year", "slug"]
class Node(models.Model):
node_id = models.AutoField(primary_key=True)
parent = models.ForeignKey('self', models.CASCADE, null=True, blank=True)
class Meta:
apps = new_apps
| bsd-3-clause | 8,553,160,393,117,914,000 | 22.595 | 77 | 0.671117 | false | 3.680967 | false | false | false |
ddurieux/alignak | alignak/misc/termcolor.py | 1 | 5121 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <[email protected]>
"""ANSII Color formatting for output in terminal."""
from __future__ import print_function
import os
__ALL__ = ['colored', 'cprint']
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(
zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
)
)
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(
zip([
'on_grey',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_white'
],
list(range(40, 48))
)
)
)
COLORS = dict(
list(
zip([
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
list(range(90, 98))
)
)
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
# Alignak mod
if color is not None:
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == '__main__':
print('Current terminal type: %s' % os.getenv('TERM'))
print('Test basic colors:')
cprint('Grey color', 'grey')
cprint('Red color', 'red')
cprint('Green color', 'green')
cprint('Yellow color', 'yellow')
cprint('Blue color', 'blue')
cprint('Magenta color', 'magenta')
cprint('Cyan color', 'cyan')
cprint('White color', 'white')
print(('-' * 78))
print('Test highlights:')
cprint('On grey color', on_color='on_grey')
cprint('On red color', on_color='on_red')
cprint('On green color', on_color='on_green')
cprint('On yellow color', on_color='on_yellow')
cprint('On blue color', on_color='on_blue')
cprint('On magenta color', on_color='on_magenta')
cprint('On cyan color', on_color='on_cyan')
cprint('On white color', color='grey', on_color='on_white')
print('-' * 78)
print('Test attributes:')
cprint('Bold grey color', 'grey', attrs=['bold'])
cprint('Dark red color', 'red', attrs=['dark'])
cprint('Underline green color', 'green', attrs=['underline'])
cprint('Blink yellow color', 'yellow', attrs=['blink'])
cprint('Reversed blue color', 'blue', attrs=['reverse'])
cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
cprint('Bold underline reverse cyan color', 'cyan',
attrs=['bold', 'underline', 'reverse'])
cprint('Dark blink concealed white color', 'white',
attrs=['dark', 'blink', 'concealed'])
print(('-' * 78))
print('Test mixing:')
cprint('Underline red on grey color', 'red', 'on_grey',
['underline'])
cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])
| agpl-3.0 | -3,544,723,343,725,155,000 | 28.096591 | 79 | 0.582894 | false | 3.700145 | false | false | false |
jsidew/jakidy | src/food/models.py | 1 | 1882 | from django.db import models
class Food(models.Model):
name = models.CharField(max_length=45)
protein = models.DecimalField(max_digits=4, decimal_places=2)
carbs = models.DecimalField(max_digits=4, decimal_places=2)
fat = models.DecimalField(max_digits=4, decimal_places=2)
price = models.DecimalField(max_digits=5, decimal_places=2)
notes = models.TextField()
@property
def calories_tot(self):
return int(round(self.protein*4 + self.carbs*4 + self.fat*9))
@property
def calories_protein(self):
return self.protein * 4
@property
def calories_carbs(self):
return self.carbs * 4
@property
def calories_fat(self):
return self.fat * 9
class Meal(models.Model):
name = models.CharField(max_length=45)
label = models.TextField()
foods = models.ManyToManyField(Food, through='Quantity')
@property
def foodlist(self):
return self.foods.all()
@property
def macros(self):
m = {
'protein': 0,
'carbs': 0,
'fat': 0,
'kcal': 0,
'price': 0
}
for f in self.foods.all():
g = self.quantity_set.get(food=f, meal=self).grams
m['protein'] = m['protein'] + f.protein / 100 * g
m['carbs'] = m['carbs'] + f.carbs / 100 * g
m['fat'] = m['fat'] + f.fat / 100 * g
m['price'] = m['price'] + f.price / 1000 * g
m['protein'] = int(round(m['protein']))
m['carbs'] = int(round(m['carbs']))
m['fat'] = int(round(m['fat']))
m['kcal'] = m['protein']*4 + m['carbs']*4 + m['fat']*9
m['price'] = round(m['price'], 2)
return m
class Quantity(models.Model):
food = models.ForeignKey(Food)
grams = models.DecimalField(max_digits=6, decimal_places=2)
meal = models.ForeignKey(Meal)
| gpl-3.0 | 3,854,542,961,084,282,000 | 32.017544 | 69 | 0.565887 | false | 3.256055 | false | false | false |
netphi/deepin-ui | dtk/ui/scrolled_window.py | 1 | 21880 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Xia Bin
#
# Author: Xia Bin <[email protected]>
# Maintainer: Xia Bin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gtk import gdk
from theme import ui_theme
from utils import remove_signal_id, color_hex_to_cairo
import gobject
import gtk
# the p_range is the virtual width/height, it's value is smaller than
# the allocation.width/height when scrollbar's width/height smaller than
# the minmum scrollbar length.
# p_range = allocation.width/height - (min_bar_len - *bar_len*)
# the *bar_len* = (adj.page_size / adj.upper) * allocation.width/height
# by this processing, 0~(adj.upper-adj.page_size) will be mapped to 0~p_range.
def value2pos(value, p_range, upper):
'''compute the scrollbar position by the adjustment value'''
if upper == 0: return 0
return p_range * float(value) / upper
def pos2value(pos, p_range, upper):
'''compute the adjustment value by the scrollbar position'''
if p_range == 0 : return 0
return pos * upper / p_range
class ScrolledWindow(gtk.Bin):
'''Scrolled window.'''
def __init__(self, right_space=2, top_bootm_space=3):
'''Init scrolled window.'''
gtk.Bin.__init__(self)
self.bar_min_length = 50 #scrollbar smallest height
self.bar_small_width = 7
self.bar_width = 14 #normal scrollbar width
self.bar_background = ui_theme.get_color("scrolledbar")
self.right_space = right_space
self.top_bootm_space = top_bootm_space
self.h_value_change_id = None
self.h_change_id = None
self.v_value_change_id = None
self.v_change_id = None
class Record():
def __init__(self):
self.bar_len = 0 #scrollbar length
self.last_pos = 0 #last mouse motion pointer's position (x or y)
#last mouse motion timestamp, if user moved the window
#then the last_pos is likely become invalid so we need "last_time"
#to deal with this situation.
self.last_time = 0
self.virtual_len = 0 #the virtual window height or width length
self.bar_pos = 0 #the scrollbar topcorner/leftcorner position
self.is_inside = False # is pointer in the scrollbar region?
self.in_motion = False # is user is draging scrollbar?
self.policy = gtk.POLICY_AUTOMATIC
self._horizaontal = Record()
self._vertical = Record()
self.set_can_focus(True)
self.vallocation = gdk.Rectangle()
self.hallocation = gdk.Rectangle()
self.set_vadjustment(gtk.Adjustment())
self.set_hadjustment(gtk.Adjustment())
self.set_has_window(False)
def do_expose_event(self, e):
if e.window == self.vwindow:
self.draw_vbar()
return True
elif e.window == self.hwindow:
self.draw_hbar()
return True
else:
return False
def draw_vbar(self):
#img = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = self.vwindow.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.bar_background.get_color()))
cr.rectangle(0, 0, self.vallocation.width, self.vallocation.height)
cr.fill()
def draw_hbar(self):
cr = self.hwindow.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.bar_background.get_color()))
cr.rectangle(0, 0, self.hallocation.width, self.hallocation.height)
cr.fill()
def do_button_release_event(self, e):
if e.window == self.hwindow:
self._horizaontal.in_motion = False
if not self._horizaontal.is_inside:
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
return True
elif e.window == self.vwindow:
self._vertical.in_motion = False
if not self._vertical.is_inside:
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
return True
else:
return False
def make_bar_smaller(self, orientation):
if orientation == gtk.ORIENTATION_HORIZONTAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, int(self._horizaontal.bar_len), self.bar_small_width))
if self.hallocation.x == 0:
self.hwindow.shape_combine_region(region, self.top_bootm_space, self.bar_width - self.bar_small_width -self.right_space)
else:
self.hwindow.shape_combine_region(region, -self.top_bootm_space, self.bar_width - self.bar_small_width -self.right_space)
elif orientation == gtk.ORIENTATION_VERTICAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, self.bar_small_width, int(self._vertical.bar_len)))
if self.vallocation.y == 0:
self.vwindow.shape_combine_region(region, self.bar_width-self.bar_small_width - self.right_space, self.top_bootm_space)
else:
self.vwindow.shape_combine_region(region, self.bar_width-self.bar_small_width - self.right_space, -self.top_bootm_space)
else:
raise "make_bar_smaller's orientation must be gtk.ORIENTATION_VERTICAL or gtk.ORIENTATION_HORIZONTAL"
return False
def make_bar_bigger(self, orientation):
if orientation == gtk.ORIENTATION_HORIZONTAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, int(self._horizaontal.bar_len), self.bar_width))
if self.hallocation.x == 0:
self.hwindow.shape_combine_region(region, self.top_bootm_space, -self.right_space)
else:
self.hwindow.shape_combine_region(region, -self.top_bootm_space, -self.right_space)
elif orientation == gtk.ORIENTATION_VERTICAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, self.bar_width, int(self._vertical.bar_len)))
if self.vallocation.y == 0:
self.vwindow.shape_combine_region(region, -self.right_space, self.top_bootm_space)
else:
self.vwindow.shape_combine_region(region, -self.right_space, -self.top_bootm_space)
else:
raise "make_bar_bigger's orientation must be gtk.ORIENTATION_VERTICAL or gtk.ORIENTATION_HORIZONTAL"
def do_scroll_event(self, e):
value = self.vadjustment.value
step = self.vadjustment.step_increment
page_size = self.vadjustment.page_size
upper = self.vadjustment.upper
#TODO: need handle other scrolltype? I can only capture below two scrolltype at the moment
if e.direction == gdk.SCROLL_DOWN:
self.vadjustment.set_value(min(upper-page_size-1, value+step))
return True
elif e.direction == gdk.SCROLL_UP:
self.vadjustment.set_value(max(0, value-step))
return True
else:
return False
def do_leave_notify_event(self, e):
if e.window == self.hwindow :
self._horizaontal.is_inside = False
#if e.y < 0 and not self._horizaontal.in_motion:
if not self._horizaontal.in_motion:
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
return True
elif e.window == self.vwindow:
self._vertical.is_inside = False
if not self._vertical.in_motion:
#if e.x < 0 and not self._vertical.in_motion:
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
return True
else:
return False
def do_enter_notify_event(self, e):
if e.window == self.hwindow:
self.make_bar_bigger(gtk.ORIENTATION_HORIZONTAL)
self._horizaontal.is_inside = True
return True
elif e.window == self.vwindow:
self.make_bar_bigger(gtk.ORIENTATION_VERTICAL)
self._vertical.is_inside = True
return True
else:
return False
def do_motion_notify_event(self, e):
if not (e.window == self.hwindow or e.window == self.vwindow): return False
if e.window == self.hwindow and (e.state & gtk.gdk.BUTTON1_MASK) == gtk.gdk.BUTTON1_MASK:
self.make_bar_bigger(gtk.ORIENTATION_HORIZONTAL)
if self._horizaontal.last_time == 0:
self._horizaontal.last_time = e.time
elif e.time - self._horizaontal.last_time > 1000:
self._horizaontal.last_time = 0
self._horizaontal.last_pos = 0
if self._horizaontal.last_pos == 0 or self._horizaontal.last_time == 0:
self._horizaontal.last_pos = e.x_root
return True
deltaX = e.x_root - self._horizaontal.last_pos
upper = self.hadjustment.upper
#the pos maybe beyond the effective range, but we will immediately corrected
#it's value.
#the "invariant" is the "value" always in the effective range.
value = pos2value(self._horizaontal.bar_pos+deltaX, self._horizaontal.virtual_len, upper)
value = max(0, min(value, self.hadjustment.upper-self.hadjustment.page_size))
self.hadjustment.set_value(value)
self._horizaontal.last_pos = e.x_root
self._horizaontal.last_time = e.time
self._horizaontal.in_motion = True
return True
elif e.window == self.vwindow and (e.state & gtk.gdk.BUTTON1_MASK) == gtk.gdk.BUTTON1_MASK:
self.make_bar_bigger(gtk.ORIENTATION_VERTICAL)
if self._vertical.last_time == 0:
self._vertical.last_time = e.time
elif e.time - self._vertical.last_time > 1000:
self._vertical.last_time = 0
self._vertical.last_pos = 0
if self._vertical.last_pos == 0 or self._vertical.last_time == 0:
self._vertical.last_pos = e.y_root
return True
upper = self.vadjustment.upper
deltaY = e.y_root - self._vertical.last_pos
value = pos2value(self._vertical.bar_pos+deltaY, self._vertical.virtual_len, upper)
value = max(0, min(value, self.vadjustment.upper-self.vadjustment.page_size))
self.vadjustment.set_value(value)
self._vertical.last_pos = e.y_root
self._vertical.last_time = e.time
self._vertical.in_motion = True
return True
def calc_vbar_length(self):
self._vertical.virtual_len = self.allocation.height
if self.vadjustment.upper <= 1 or self._vertical.policy == gtk.POLICY_NEVER:
self._vertical.bar_len = 0
return
ratio = float(self.vadjustment.page_size) / (self.vadjustment.upper-self.vadjustment.lower)
# assert(self.vadjustment.upper >= self.vadjustment.page_size)
if ratio == 1:
self._vertical.bar_len = 0
else:
bar_len = self._vertical.virtual_len * ratio
if bar_len < self.bar_min_length:
self._vertical.virtual_len -= (self.bar_min_length - bar_len)
self._vertical.bar_len = max(bar_len, self.bar_min_length)
def calc_vbar_allocation(self):
self.vallocation = gdk.Rectangle(
self.allocation.width - self.bar_width, int(self._vertical.bar_pos),
self.bar_width, int(self._vertical.bar_len))
def calc_hbar_length(self):
self._horizaontal.virtual_len = self.allocation.width
if self.hadjustment.upper <= 1 or self._horizaontal.policy == gtk.POLICY_NEVER:
self._horizaontal.bar_len = 0
return
ratio = float(self.hadjustment.page_size) / (self.hadjustment.upper-self.hadjustment.lower)
# assert(self.hadjustment.lower == 0)
# assert(self.hadjustment.upper >= self.hadjustment.page_size)
if ratio == 1:
self._horizaontal.bar_len = 0
else:
bar_len = self._horizaontal.virtual_len * ratio
if bar_len < self.bar_min_length:
self._horizaontal.virtual_len -= (self.bar_min_length - bar_len)
self._horizaontal.bar_len = max(bar_len, self.bar_min_length)
def calc_hbar_allocation(self):
#assert 0 <= int(self.hpos) <= self.allocation.width - self.hbar_length,\
# "self.hpos %f self.allocation.width %f self.hbar_lengh %f" % (self.hpos, self.allocation.width,
# self.hbar_length)
self.hallocation = gdk.Rectangle(
int(self._horizaontal.bar_pos), self.allocation.height - self.bar_width,
int(self._horizaontal.bar_len), self.bar_width)
def vadjustment_changed(self, adj):
if self.get_realized():
# assert(self.vadjustment.value <= self.vadjustment.upper-self.vadjustment.page_size)
upper = self.vadjustment.upper
self._vertical.bar_pos = value2pos(adj.value, self._vertical.virtual_len, upper)
self.calc_vbar_allocation()
self.vwindow.move_resize(*self.vallocation)
self.queue_draw()
def hadjustment_changed(self, adj):
if self.get_realized():
# assert(self.hadjustment.value <= self.hadjustment.upper-self.hadjustment.page_size)
upper = self.hadjustment.upper
self._horizaontal.bar_pos = value2pos(adj.value, self._horizaontal.virtual_len, upper)
self.calc_hbar_allocation()
self.hwindow.move_resize(*self.hallocation)
self.queue_draw()
def add_with_viewport(self, child):
vp = gtk.Viewport()
vp.set_shadow_type(gtk.SHADOW_NONE)
vp.add(child)
vp.show()
self.add(vp)
def add_child(self, child):
self.add_with_viewport(child)
#raise Exception, "use add_with_viewport instead add_child"
def do_add(self, child):
self.child = None
gtk.Bin.do_add(self, child)
child.set_scroll_adjustments(self.hadjustment, self.vadjustment)
def do_size_request(self, requsition):
if self.child:
#print "sel size_request", (requsition.width, requsition.height)
self.child.do_size_request(self.child, requsition)
#print "child size request:", (requsition.width, requsition.height)
def do_size_allocate(self, allocation):
#print "do_size_allocate", allocation
self.allocation = allocation
if self.get_realized():
self.binwindow.move_resize(*self.allocation)
#must before calc_xxx_length, because we need child to cumpute the adjustment value
if self.child:
(allocation.x, allocation.y) = (0, 0)
self.child.do_size_allocate(self.child, allocation)
self.update_scrollbar()
if self.get_realized():
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
def update_scrollbar(self, *arg, **argk):
if self.get_realized():
self.calc_vbar_length()
self.calc_hbar_length()
self.vadjustment.emit('value-changed')
self.hadjustment.emit('value-changed')
def do_unrealize(self):
#print "do_unrealize"
self.binwindow.set_user_data(None)
self.binwindow.destroy()
self.binwindow = None
self.vwindow.set_user_data(None)
self.vwindow.destroy()
self.vwindow = None
self.hwindow.set_user_data(None)
self.hwindow.destroy()
self.hwindow = None
# assert(self.get_realized() == True)
gtk.Bin.do_unrealize(self)
# assert(self.get_realized() == False)
def do_realize(self):
#print "self.get_parent_window():", self.get_parent_window()
#print "do_realize", self.get_realized()
# assert(self.get_realized() == False)
gtk.Bin.do_realize(self)
# assert(self.get_realized() == True)
self.binwindow = gtk.gdk.Window(self.get_parent_window(),
x=self.allocation.x,
y=self.allocation.y,
width=self.allocation.width,
height=self.allocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
event_mask=(self.get_events()| gdk.EXPOSURE_MASK | gdk.VISIBILITY_NOTIFY_MASK),
visual=self.get_visual(),
colormap=self.get_colormap(),
)
self.binwindow.set_user_data(self)
self.vwindow = gtk.gdk.Window(self.binwindow,
x=self.vallocation.x,
y=self.vallocation.y,
width=self.vallocation.width,
height=self.vallocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
visual=self.get_visual(),
colormap=self.get_colormap(),
event_mask=(self.get_events()
| gdk.EXPOSURE_MASK
| gdk.ENTER_NOTIFY_MASK | gdk.LEAVE_NOTIFY_MASK | gdk.BUTTON_RELEASE_MASK
| gdk.BUTTON_MOTION_MASK
| gdk.POINTER_MOTION_HINT_MASK | gdk.BUTTON_PRESS_MASK
)
)
self.vwindow.set_user_data(self)
#sefl.vwindow.get_
#self.vwindow.set_background(self.bar_background)
self.hwindow = gtk.gdk.Window(self.binwindow,
x=self.hallocation.x,
y=self.hallocation.y,
width=self.hallocation.width,
height=self.hallocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
colormap=self.get_colormap(),
visual=self.get_visual(),
event_mask=(self.get_events()
| gdk.EXPOSURE_MASK
| gdk.ENTER_NOTIFY_MASK | gdk.LEAVE_NOTIFY_MASK | gdk.BUTTON_RELEASE_MASK
| gdk.BUTTON_MOTION_MASK
| gdk.POINTER_MOTION_HINT_MASK | gdk.BUTTON_PRESS_MASK
)
)
self.hwindow.set_user_data(self)
#self.hwindow.set_background(self.bar_background)
if self.child:
self.child.set_parent_window(self.binwindow)
self.queue_resize()
def set_shadow_type(self, t):
#raise Warning("dtk's scrolledwindow didn't support this function")
return
def set_policy(self, h, v):
self._horizaontal.policy = h
self._vertical.policy = v
return
def do_map(self):
gtk.Bin.do_map(self) #must before self.xwindow.show(), didn't know the reason.
self.binwindow.show()
self.hwindow.show()
self.vwindow.show()
if self.child and not self.child.get_mapped() and self.child.get_visible():
self.child.do_map(self.child)
def do_unmap(self):
#self.set_mapped(False)
self.binwindow.hide()
self.hwindow.hide()
self.vwindow.hide()
gtk.Bin.do_unmap(self)
def do_remove(self, child):
child.set_scroll_adjustments(None, None)
gtk.Bin.do_remove(self, child)
def get_vadjustment(self):
return self.vadjustment
def get_hadjustment(self):
return self.hadjustment
def set_hadjustment(self, adj):
remove_signal_id(self.h_value_change_id)
remove_signal_id(self.h_change_id)
self.hadjustment = adj
h_value_change_handler_id = self.hadjustment.connect('value-changed', self.hadjustment_changed)
h_change_handler_id = self.hadjustment.connect('changed', self.update_scrollbar)
self.h_value_change_id = (self.hadjustment, h_value_change_handler_id)
self.h_change_id = (self.hadjustment, h_change_handler_id)
def set_vadjustment(self, adj):
remove_signal_id(self.v_value_change_id)
remove_signal_id(self.v_change_id)
self.vadjustment = adj
v_value_change_handler_id = self.vadjustment.connect('value-changed', self.vadjustment_changed)
v_change_handler_id = self.vadjustment.connect('changed', self.update_scrollbar)
self.v_value_change_id = (self.vadjustment, v_value_change_handler_id)
self.v_change_id = (self.vadjustment, v_change_handler_id)
def _test_calc(self):
for i in xrange(0, int(self.vadjustment.upper-self.vadjustment.page_size), 30):
pos = value2pos(i, self._vertical.virtual_len, self.vadjustment.upper)
print "value:%f --> pos:%d" % (i, pos),
assert(pos <= self.allocation.height-self._vertical.bar_len),\
"pos(%f) should small than(%f)" % (pos, self.allocation.height-self._vertical.bar_len)
value = pos2value(pos, self._vertical.virtual_len, self.vadjustment.upper)
print "\t pos:%d -->value:%f" % (pos, value)
print "v_len:%f, height:%f, vir_bar_len:%d" % ( self._vertical.virtual_len,
self.allocation.height, self._vertical.bar_len)
gobject.type_register(ScrolledWindow)
| gpl-3.0 | -7,670,683,437,124,144,000 | 40.596958 | 137 | 0.603016 | false | 3.573412 | false | false | false |
golden-tech-native/gd_facerecognize | server/common/hanzi_to_pinyin/pinyin.py | 1 | 2488 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
原版代码:https://github.com/cleverdeng/pinyin.py
新增功能:
1、可以传入参数firstcode:如果为true,只取汉子的第一个拼音字母;如果为false,则会输出全部拼音;
2、修复:如果为英文字母,则直接输出;
3、修复:如果分隔符为空字符串,仍然能正常输出;
4、升级:可以指定词典的文件路径
"""
__version__ = '0.9'
__all__ = ["PinYin"]
import os.path
class PinYin(object):
def __init__(self):
self.word_dict = {}
def load_word(self, dict_file):
self.dict_file = dict_file
if not os.path.exists(self.dict_file):
raise IOError("NotFoundFile")
with file(self.dict_file) as f_obj:
for f_line in f_obj.readlines():
try:
line = f_line.split(' ')
self.word_dict[line[0]] = line[1]
except:
line = f_line.split(' ')
self.word_dict[line[0]] = line[1]
def hanzi2pinyin(self, string="", firstcode=False):
result = []
if not isinstance(string, unicode):
string = string.decode("utf-8")
for char in string:
key = '%X' % ord(char)
value = self.word_dict.get(key, char)
outpinyin = str(value).split()[0][:-1].lower()
if not outpinyin:
outpinyin = char
if firstcode:
result.append(outpinyin[0])
else:
result.append(outpinyin)
return result
def hanzi2pinyin_split(self, string="", split="", firstcode=False):
"""提取中文的拼音
@param string:要提取的中文
@param split:分隔符
@param firstcode: 提取的是全拼还是首字母?如果为true表示提取首字母,默认为False提取全拼
"""
result = self.hanzi2pinyin(string=string, firstcode=firstcode)
return split.join(result)
if __name__ == "__main__":
test = PinYin()
test.load_word('word.data')
string = "Java程序性能优化-让你的Java程序更快更稳定"
print "in: %s" % string
print "out: %s" % str(test.hanzi2pinyin(string=string))
print "out: %s" % test.hanzi2pinyin_split(string=string, split="", firstcode=True)
print "out: %s" % test.hanzi2pinyin_split(string=string, split="", firstcode=False)
| mit | -7,410,435,475,929,635,000 | 27.746667 | 87 | 0.551948 | false | 2.515753 | false | false | false |
mivp/tessterrain | examples/geo_weather/fetch_weather_data.py | 1 | 3521 | #!/usr/bin/env python
import csv
import json
import requests
import numpy as np
import sqlite3
csv_filename = "testdata/vic_usgs/south_west_stations.csv"
db_filename = "testdata/vic_usgs/south_west_stations.db"
# open database
conn = sqlite3.connect(db_filename)
c = conn.cursor()
# Create stations table
c.execute('''CREATE TABLE IF NOT EXISTS stations
(id text, short text, name text, lon real, lat real, height real, json text, UNIQUE(id))''')
# Create observations table
c.execute('''CREATE TABLE IF NOT EXISTS observations
(id text, local_date_time text, local_date_time_full text,
apparent_t real, delta_t real, air_temp real, rain_trace text, rel_hum real, wind_dir text, wind_spd_kmh real,
UNIQUE(id, local_date_time_full))''')
# Create time point table
c.execute('''CREATE TABLE IF NOT EXISTS time
(id text, local_date_time_full text, UNIQUE(local_date_time_full))''')
# stations
with open(csv_filename, 'rb') as csvfile:
stations = csv.DictReader(csvfile)
for station in stations:
id = station["ID"]
short = station["Short"]
name = station["Name"]
lon = float(station["Lon"])
lat = float(station["Lat"])
height = float(station["Height"])
json = station["Json"]
# Insert a row of data into stations
query_str = "INSERT OR IGNORE INTO stations VALUES ('%s', '%s', '%s', %f, %f, %f, '%s')" % (id, short, name, lon, lat, height, json)
print query_str
c.execute( query_str )
# Update observations table
r = requests.get(json)
stationdata = r.json()
data = stationdata["observations"]["data"]
nrows = len(data)
for i in range(nrows):
apparent_t = data[i]['apparent_t']
if apparent_t is None:
apparent_t = 0
delta_t = data[i]['delta_t']
if delta_t is None:
delta_t = 0
air_temp = data[i]['air_temp']
if air_temp is None:
air_temp = 0
rain_trace = data[i]['rain_trace']
if rain_trace is None:
rain_trace = ''
rel_hum = data[i]['rel_hum']
if rel_hum is None:
rel_hum = 0
wind_spd_kmh = data[i]['wind_spd_kmh']
if wind_spd_kmh is None:
wind_spd_kmh = 0
query_str = "INSERT OR IGNORE INTO observations VALUES ('%s', '%s', '%s', %0.2f, %0.2f, %0.2f, '%s', %0.2f, '%s', %0.2f)" % \
(id, data[i]['local_date_time'], data[i]['local_date_time_full'], apparent_t, delta_t, \
air_temp, rain_trace, rel_hum, data[i]['wind_dir'], wind_spd_kmh)
print query_str
c.execute(query_str)
query_str = "INSERT OR IGNORE INTO time VALUES (%s, '%s')" % (id, data[i]['local_date_time_full'])
print query_str
c.execute(query_str)
# commit the change
conn.commit()
# close database
conn.close()
# TESTING: print out to check
"""
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = c.fetchall()
print tables
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, conn)
#print table
table.to_csv('testdata/vic_usgs/south_west_stations_' + table_name + '.csv', index_label='index')
# close database
conn.close()
""" | gpl-3.0 | -7,483,555,041,241,263,000 | 31.311927 | 140 | 0.575689 | false | 3.346958 | false | false | false |
eteq/ginga | ginga/qtw/GingaQt.py | 1 | 27414 | #
# GingaQt.py -- Qt display handler for the Ginga reference viewer.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# stdlib imports
import sys, os
import traceback
import platform
import time
# GUI imports
from ginga.qtw.QtHelp import QtGui, QtCore, QFont, \
QImage, QIcon, QPixmap, MenuBar
from ginga.qtw import Widgets
# Local application imports
from ginga import cmap, imap
from ginga.misc import Bunch
from ginga.canvas.types.layer import DrawingCanvas
from ginga.util.six.moves import map, zip
moduleHome = os.path.split(sys.modules[__name__].__file__)[0]
sys.path.insert(0, moduleHome)
childDir = os.path.join(moduleHome, 'plugins')
sys.path.insert(0, childDir)
from ginga.qtw import ColorBar, Readout, PluginManagerQt, \
QtHelp, QtMain, ImageViewCanvasQt
icon_path = os.path.abspath(os.path.join(moduleHome, '..', 'icons'))
rc_file = os.path.join(moduleHome, "qt_rc")
class GingaViewError(Exception):
pass
class GingaView(QtMain.QtMain):
def __init__(self, logger, ev_quit):
# call superclass constructors--sets self.app
QtMain.QtMain.__init__(self, logger=logger, ev_quit=ev_quit)
if os.path.exists(rc_file):
self.app.setStyleSheet(rc_file)
# defaults for height and width
#self.default_height = min(900, self.screen_ht - 100)
#self.default_width = min(1600, self.screen_wd)
self.w = Bunch.Bunch()
self.iconpath = icon_path
self._lastwsname = 'channels'
self.layout = None
self._lsize = None
self._rsize = None
def set_layout(self, layout):
self.layout = layout
def get_screen_dimensions(self):
return (self.screen_wd, self.screen_ht)
def build_toplevel(self):
self.font = self.getFont('fixedFont', 12)
self.font11 = self.getFont('fixedFont', 11)
self.font14 = self.getFont('fixedFont', 14)
self.font18 = self.getFont('fixedFont', 18)
self.w.tooltips = None
QtGui.QToolTip.setFont(self.font11)
self.ds = QtHelp.Desktop()
self.ds.make_desktop(self.layout, widgetDict=self.w)
# TEMP: FIX ME!
self.gpmon.ds = self.ds
for root in self.ds.toplevels:
# add delete/destroy callbacks
## root.connect(root, QtCore.SIGNAL('closeEvent()'),
## self.quit)
#root.setApp(self)
root.setWindowTitle("Ginga")
self.ds.add_callback('all-closed', self.quit)
self.w.root = root
self.w.fscreen = None
# Create main (center) FITS image pane
self.w.vbox = self.w['main'].layout()
self.w.vbox.setSpacing(0)
self.w.mnb = self.w['channels']
if isinstance(self.w.mnb, QtGui.QMdiArea):
self.w.mnb.subWindowActivated.connect(self.page_switch_mdi_cb)
self.w.mnb.set_mode('tabs')
else:
self.w.mnb.currentChanged.connect(self.page_switch_cb)
# readout
if self.settings.get('share_readout', True):
self.readout = self.build_readout()
self.add_callback('field-info', self.readout_cb, self.readout, None)
rw = self.readout.get_widget()
self.w.vbox.addWidget(rw, stretch=0)
# bottom buttons
plw = QtGui.QWidget()
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(2)
plw.setLayout(hbox)
cbox1 = QtHelp.ComboBox()
self.w.channel = cbox1
cbox1.setToolTip("Select a channel")
cbox1.activated.connect(self.channel_select_cb)
hbox.addWidget(cbox1, stretch=0)
opmenu = QtGui.QMenu()
self.w.operation = opmenu
btn = QtGui.QPushButton("Operation")
btn.clicked.connect(self.invoke_op_cb)
btn.setToolTip("Invoke operation")
self.w.opbtn = btn
hbox.addWidget(btn, stretch=0)
w = QtGui.QWidget()
self.w.optray = QtGui.QHBoxLayout()
self.w.optray.setContentsMargins(0, 0, 0, 0)
self.w.optray.setSpacing(2)
w.setLayout(self.w.optray)
hbox.addWidget(w, stretch=1, alignment=QtCore.Qt.AlignLeft)
self.w.vbox.addWidget(plw, stretch=0)
# Add colormap bar
cbar = self.build_colorbar()
self.w.vbox.addWidget(cbar, stretch=0)
menuholder = self.w['menu']
# NOTE: menubar is a ginga.Widgets wrapper
self.w.menubar = self.add_menus(menuholder)
self.add_dialogs()
statusholder = self.w['status']
self.add_statusbar(statusholder)
self.w.root.show()
def getPluginManager(self, logger, fitsview, ds, mm):
return PluginManagerQt.PluginManager(logger, fitsview, ds, mm)
def _name_mangle(self, name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def add_menus(self, holder):
menubar = Widgets.Menubar()
self.menubar = menubar
menubar_w = menubar.get_widget()
# NOTE: Special hack for Mac OS X, otherwise the menus
# do not get added to the global OS X menu
macos_ver = platform.mac_ver()[0]
if len(macos_ver) > 0:
self.w['top'].layout().addWidget(menubar_w, stretch=0)
else:
holder.layout().addWidget(menubar_w, stretch=1)
# create a File pulldown menu, and add it to the menu bar
filemenu = menubar.add_name("File")
item = filemenu.add_name("Load Image")
item.add_callback('activated', lambda *args: self.gui_load_file())
item = filemenu.add_name("Remove Image")
item.add_callback("activated", lambda *args: self.remove_current_image())
filemenu.add_separator()
item = filemenu.add_name("Quit")
item.add_callback('activated', lambda *args: self.windowClose())
# create a Channel pulldown menu, and add it to the menu bar
chmenu = menubar.add_name("Channel")
item = chmenu.add_name("Add Channel")
item.add_callback('activated', lambda *args: self.gui_add_channel())
item = chmenu.add_name("Add Channels")
item.add_callback('activated', lambda *args: self.gui_add_channels())
item = chmenu.add_name("Delete Channel")
item.add_callback('activated', lambda *args: self.gui_delete_channel())
# create a Window pulldown menu, and add it to the menu bar
wsmenu = menubar.add_name("Workspace")
item = wsmenu.add_name("Add Workspace")
item.add_callback('activated', lambda *args: self.gui_add_ws())
item = wsmenu.add_name("Take Tab")
item.add_callback('activated',
lambda *args: self.ds.take_tab_cb(self.w.mnb,
args))
if isinstance(self.w.mnb, QtGui.QMdiArea):
item = wsmenu.add_name("Panes as Tabs")
item.add_callback(lambda *args: self.tabstoggle_cb())
item.get_widget().setCheckable(True)
is_tabs = (self.w.mnb.get_mode() == 'tabs')
item.get_widget().setChecked(is_tabs)
item = wsmenu.add_name("Tile Panes")
item.add_callback('activated', lambda *args: self.tile_panes_cb())
item = wsmenu.add_name("Cascade Panes")
item.add_callback(lambda *args: self.cascade_panes_cb())
# # create a Option pulldown menu, and add it to the menu bar
# optionmenu = menubar.add_name("Option")
# create a Plugins pulldown menu, and add it to the menu bar
plugmenu = menubar.add_name("Plugins")
self.w.menu_plug = plugmenu
# create a Help pulldown menu, and add it to the menu bar
helpmenu = menubar.add_name("Help")
item = helpmenu.add_name("About")
item.add_callback('activated', lambda *args: self.banner(raiseTab=True))
item = helpmenu.add_name("Documentation")
item.add_callback('activated', lambda *args: self.help())
return menubar
def add_dialogs(self):
filesel = QtGui.QFileDialog(self.w.root, directory=os.curdir)
filesel.setFileMode(QtGui.QFileDialog.ExistingFile)
filesel.setViewMode(QtGui.QFileDialog.Detail)
self.filesel = filesel
def add_plugin_menu(self, name):
# NOTE: self.w.menu_plug is a ginga.Widgets wrapper
item = self.w.menu_plug.add_name("Start %s" % (name))
item.add_callback('activated',
lambda *args: self.start_global_plugin(name))
def add_statusbar(self, holder):
self.w.status = QtGui.QStatusBar()
holder.layout().addWidget(self.w.status, stretch=1)
def fullscreen(self):
self.w.root.showFullScreen()
def normalsize(self):
self.w.root.showNormal()
def maximize(self):
self.w.root.showMaximized()
def toggle_fullscreen(self):
if not self.w.root.isFullScreen():
self.w.root.showFullScreen()
else:
self.w.root.showNormal()
def build_fullscreen(self):
w = self.w.fscreen
self.w.fscreen = None
if w is not None:
w.destroy()
return
# Get image from current focused channel
chinfo = self.get_channelInfo()
fitsimage = chinfo.fitsimage
settings = fitsimage.get_settings()
rgbmap = fitsimage.get_rgbmap()
root = QtHelp.TopLevel()
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
root.setLayout(vbox)
fi = self.build_viewpane(settings, rgbmap=rgbmap)
iw = fi.get_widget()
vbox.addWidget(iw, stretch=1)
# Get image from current focused channel
image = fitsimage.get_image()
if image is None:
return
fi.set_image(image)
# Copy attributes of the frame
fitsimage.copy_attributes(fi,
[#'transforms',
#'cutlevels',
'rgbmap'])
root.showFullScreen()
self.w.fscreen = root
def add_operation(self, title):
opmenu = self.w.operation
item = QtGui.QAction(title, opmenu)
item.triggered.connect(lambda: self.start_operation_cb(title))
opmenu.addAction(item)
self.operations.append(title)
####################################################
# THESE METHODS ARE CALLED FROM OTHER MODULES & OBJECTS
####################################################
def make_button(self, name, wtyp, icon=None, tooltip=None):
picon = None
if icon:
iconfile = os.path.join(self.iconpath, '%s.png' % icon)
try:
image = QImage(iconfile)
pixmap = QPixmap.fromImage(image)
picon = QIcon(pixmap)
qsize = QtCore.QSize(24, 24)
except Exception as e:
self.logger.error("Error loading icon '%s': %s" % (
iconfile, str(e)))
if wtyp == 'button':
if picon:
w = Widgets.Button()
_w = w.get_widget()
_w.setIconSize(qsize)
_w.setIcon(picon)
else:
w = Widgets.Button(name)
elif wtyp == 'toggle':
if picon:
w = Widgets.ToggleButton()
_w = w.get_widget()
_w.setIconSize(qsize)
_w.setIcon(picon)
else:
w = Widgets.ToggleButton()
return w
def set_titlebar(self, text):
self.w.root.setWindowTitle("Ginga: %s" % text)
def build_readout(self):
readout = Readout.Readout(-1, 20)
# NOTE: Special hack for Mac OS X, otherwise the font on the readout
# is too small
macos_ver = platform.mac_ver()[0]
if len(macos_ver) > 0:
readout.set_font(self.font14)
else:
readout.set_font(self.font11)
return readout
def build_colorbar(self):
cbar = ColorBar.ColorBar(self.logger)
cbar.set_cmap(self.cm)
cbar.set_imap(self.im)
cbar.resize(700, 15)
#cbar.show()
self.colorbar = cbar
self.add_callback('active-image', self.change_cbar, cbar)
cbar.add_callback('motion', self.cbar_value_cb)
fr = QtGui.QFrame()
fr.setContentsMargins(0, 0, 0, 0)
layout = QtGui.QHBoxLayout()
fr.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
fr.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
layout.addWidget(cbar, stretch=1)
return fr
def build_viewpane(self, settings, rgbmap=None):
# instantiate bindings loaded with users preferences
bclass = ImageViewCanvasQt.ImageViewCanvas.bindingsClass
bindprefs = self.prefs.createCategory('bindings')
bd = bclass(self.logger, settings=bindprefs)
fi = ImageViewCanvasQt.ImageViewCanvas(logger=self.logger,
rgbmap=rgbmap,
settings=settings,
bindings=bd)
canvas = DrawingCanvas()
canvas.enable_draw(False)
fi.set_canvas(canvas)
fi.set_follow_focus(settings.get('follow_focus', True))
fi.enable_auto_orient(True)
fi.add_callback('motion', self.motion_cb)
fi.add_callback('cursor-down', self.force_focus_cb)
fi.add_callback('key-press', self.keypress)
fi.add_callback('drag-drop', self.dragdrop)
fi.ui_setActive(True)
for name in ['cuts']:
settings.getSetting(name).add_callback('set',
self.change_range_cb, fi, self.colorbar)
bd = fi.get_bindings()
bd.enable_all(True)
rgbmap = fi.get_rgbmap()
rgbmap.add_callback('changed', self.rgbmap_cb, fi)
fi.set_bg(0.2, 0.2, 0.2)
return fi
def add_viewer(self, name, settings,
use_readout=False, workspace=None):
vwidget = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(1, 1, 1, 1)
vbox.setSpacing(0)
vwidget.setLayout(vbox)
fi = self.build_viewpane(settings)
iw = fi.get_widget()
fi.add_callback('focus', self.focus_cb, name)
vbox.addWidget(iw, stretch=1)
fi.set_name(name)
if use_readout:
readout = self.build_readout()
# TEMP: hack
readout.fitsimage = fi
fi.add_callback('image-set', self.readout_config, readout)
self.add_callback('field-info', self.readout_cb, readout, name)
rw = readout.get_widget()
rw.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed))
vbox.addWidget(rw, stretch=0, alignment=QtCore.Qt.AlignLeft)
else:
readout = None
# Add a page to the specified notebook
if not workspace:
workspace = 'channels'
self.ds.add_tab(workspace, vwidget, 1, name)
self.update_pending()
bnch = Bunch.Bunch(fitsimage=fi, view=iw, container=vwidget,
readout=readout, workspace=workspace)
return bnch
def gui_add_channel(self, chname=None):
if not chname:
self.chncnt += 1
chname = "Image%d" % self.chncnt
lbl = QtGui.QLabel('New channel name:')
ent = QtGui.QLineEdit()
ent.setText(chname)
lbl2 = QtGui.QLabel('Workspace:')
cbox = QtHelp.ComboBox()
names = self.ds.get_wsnames()
try:
idx = names.index(self._lastwsname)
except:
idx = 0
for name in names:
cbox.append_text(name)
cbox.setCurrentIndex(idx)
dialog = QtHelp.Dialog("Add Channel",
0,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.add_channel_cb(w, rsp, ent, cbox, names))
box = dialog.get_content_area()
layout = QtGui.QVBoxLayout()
box.setLayout(layout)
layout.addWidget(lbl, stretch=0)
layout.addWidget(ent, stretch=0)
layout.addWidget(lbl2, stretch=0)
layout.addWidget(cbox, stretch=0)
dialog.show()
def gui_add_channels(self):
captions = (('Prefix', 'entry'),
('Number', 'spinbutton'),
('Workspace', 'combobox'),
)
w, b = QtHelp.build_info(captions)
b.prefix.setText("Image")
b.number.setRange(1, 12)
b.number.setSingleStep(1)
b.number.setValue(1)
cbox = b.workspace
names = self.ds.get_wsnames()
try:
idx = names.index('channels')
except:
idx = 0
for name in names:
cbox.append_text(name)
cbox.setCurrentIndex(idx)
dialog = QtHelp.Dialog("Add Channels",
0,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.add_channels_cb(w, rsp,
b, names))
box = dialog.get_content_area()
layout = QtGui.QVBoxLayout()
box.setLayout(layout)
layout.addWidget(w, stretch=1)
dialog.show()
def gui_delete_channel(self):
chinfo = self.get_channelInfo()
chname = chinfo.name
lbl = QtGui.QLabel("Really delete channel '%s' ?" % (chname))
dialog = QtHelp.Dialog("Delete Channel",
0,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.delete_channel_cb(w, rsp, chname))
box = dialog.get_content_area()
layout = QtGui.QVBoxLayout()
box.setLayout(layout)
layout.addWidget(lbl, stretch=0)
dialog.show()
def gui_add_ws(self):
captions = (('Workspace name', 'entry'),
('Workspace type', 'combobox'),
('In workspace', 'combobox'),
('Channel prefix', 'entry'),
('Number of channels', 'spinbutton'),
('Share settings', 'entry'),
)
w, b = QtHelp.build_info(captions)
self.wscount += 1
wsname = "ws%d" % (self.wscount)
b.workspace_name.setText(wsname)
b.share_settings.setMaxLength(60)
cbox = b.workspace_type
cbox.append_text("Tabs")
cbox.append_text("Grid")
cbox.append_text("MDI")
cbox.setCurrentIndex(1)
cbox = b.in_workspace
names = self.ds.get_wsnames()
names.insert(0, 'top level')
try:
idx = names.index('channels')
except:
idx = 0
for name in names:
cbox.append_text(name)
cbox.setCurrentIndex(idx)
b.channel_prefix.setText("Image")
spnbtn = b.number_of_channels
spnbtn.setRange(0, 12)
spnbtn.setSingleStep(1)
spnbtn.setValue(4)
dialog = QtHelp.Dialog("Add Workspace",
0,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.new_ws_cb(w, rsp, b, names))
box = dialog.get_content_area()
layout = QtGui.QVBoxLayout()
box.setLayout(layout)
layout.addWidget(w, stretch=1)
dialog.show()
def new_ws_cb(self, w, rsp, b, names):
w.close()
wsname = str(b.workspace_name.text())
idx = b.workspace_type.currentIndex()
if rsp == 0:
return
d = { 0: 'nb', 1: 'grid', 2: 'mdi' }
wstype = d[idx]
idx = b.in_workspace.currentIndex()
inSpace = names[idx]
self.add_workspace(wsname, wstype, inSpace=inSpace)
chpfx = b.channel_prefix.text()
num = int(b.number_of_channels.value())
if num <= 0:
return
# Create a settings template to copy settings from
settings_template = self.prefs.getSettings('channel_Image')
name = "channel_template_%f" % (time.time())
settings = self.prefs.createCategory(name)
settings_template.copySettings(settings)
share_list = b.share_settings.text().split()
chbase = self.chncnt
self.chncnt += num
for i in range(num):
chname = "%s%d" % (chpfx, chbase+i)
self.add_channel(chname, workspace=wsname,
settings_template=settings_template,
settings_share=settings,
share_keylist=share_list)
return True
def gui_load_file(self, initialdir=None):
if self.filesel.exec_():
fileNames = list(map(str, list(self.filesel.selectedFiles())))
self.load_file(fileNames[0])
#self.start_operation_cb('FBrowser')
def statusMsg(self, format, *args):
if not format:
s = ''
else:
s = format % args
# remove message in about 10 seconds
self.w.status.showMessage(s, 10000)
def setPos(self, x, y):
self.w.root.move(x, y)
def setSize(self, wd, ht):
self.w.root.resize(wd, ht)
def setGeometry(self, geometry):
# Painful translation of X window geometry specification
# into correct calls to Qt
coords = geometry.replace('+', ' +')
coords = coords.replace('-', ' -')
coords = coords.split()
if 'x' in coords[0]:
# spec includes dimensions
dim = coords[0]
coords = coords[1:]
else:
# spec is position only
dim = None
if dim is not None:
# user specified dimensions
dim = list(map(int, dim.split('x')))
self.setSize(*dim)
if len(coords) > 0:
# user specified position
coords = list(map(int, coords))
self.setPos(*coords)
def collapse_pane(self, side):
"""
Toggle collapsing the left or right panes.
"""
# TODO: this is too tied to one configuration, need to figure
# out how to generalize this
hsplit = self.w['hpnl']
sizes = hsplit.sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == 'right':
if rsize < 10:
# restore pane
rsize = self._rsize
msize -= rsize
else:
# minimize pane
self._rsize = rsize
msize += rsize
rsize = 0
elif side == 'left':
if lsize < 10:
# restore pane
lsize = self._lsize
msize -= lsize
else:
# minimize pane
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.setSizes((lsize, msize, rsize))
def getFont(self, fontType, pointSize):
fontFamily = self.settings.get(fontType)
font = QFont(fontFamily, pointSize)
return font
####################################################
# CALLBACKS
####################################################
def windowClose(self, *args):
"""Quit the application.
"""
self.quit()
def quit(self, *args):
"""Quit the application.
"""
self.logger.info("Attempting to shut down the application...")
self.stop()
root = self.w.root
self.w.root = None
while len(self.ds.toplevels) > 0:
w = self.ds.toplevels.pop()
w.deleteLater()
def channel_select_cb(self, index):
if index >= 0:
chname = self.channelNames[index]
self.logger.debug("Channel changed, index=%d chname=%s" % (
index, chname))
self.change_channel(chname)
def add_channel_cb(self, w, rsp, ent, cbox, names):
chname = str(ent.text())
idx = cbox.currentIndex()
wsname = names[idx]
w.close()
# save name for next add
self._lastwsname = wsname
if rsp == 0:
return
self.add_channel(chname, workspace=wsname)
return True
def add_channels_cb(self, w, rsp, b, names):
chpfx = b.prefix.text()
idx = b.workspace.currentIndex()
wsname = names[idx]
num = int(b.number.value())
w.close()
if (rsp == 0) or (num <= 0):
return
chbase = self.chncnt
self.chncnt += num
for i in range(num):
chname = "%s%d" % (chpfx, chbase+i)
self.add_channel(chname, workspace=wsname)
return True
def delete_channel_cb(self, w, rsp, chname):
w.close()
if rsp == 0:
return
self.delete_channel(chname)
return True
def invoke_op_cb(self):
menu = self.w.operation
menu.popup(self.w.opbtn.mapToGlobal(QtCore.QPoint(0,0)))
def start_operation_cb(self, name):
index = self.w.channel.currentIndex()
chname = str(self.w.channel.itemText(index))
return self.start_local_plugin(chname, name, None)
def tile_panes_cb(self):
self.w.mnb.tileSubWindows()
def cascade_panes_cb(self):
self.w.mnb.cascadeSubWindows()
def tabstoggle_cb(self, useTabs):
if useTabs:
self.w.mnb.setViewMode(QtGui.QMdiArea.TabbedView)
else:
self.w.mnb.setViewMode(QtGui.QMdiArea.SubWindowView)
def page_switch_cb(self, index):
self.logger.debug("index switched to %d" % (index))
if index >= 0:
container = self.w.mnb.widget(index)
self.logger.debug("container is %s" % (container))
# Find the channel that contains this widget
chnames = self.get_channelNames()
for chname in chnames:
chinfo = self.get_channelInfo(chname)
if 'container' in chinfo and (chinfo.container == container):
fitsimage = chinfo.fitsimage
if fitsimage != self.getfocus_fitsimage():
self.logger.debug("Active channel switch to '%s'" % (
chname))
self.change_channel(chname, raisew=False)
return True
def page_switch_mdi_cb(self, w):
if w is not None:
index = self.w.mnb.indexOf(w.widget())
return self.page_switch_cb(index)
# END
| bsd-3-clause | 8,010,920,929,081,022,000 | 32.14873 | 92 | 0.546691 | false | 3.76773 | false | false | false |
mapgears/scribeui | scribeui_pyramid/modules/plugins/mapcache/models.py | 1 | 1215 | # -*- coding: utf-8 -*-
from scribeui_pyramid.modules.app.sqla import Base, BaseMixin
import sqlalchemy as sa
#from . import (
# DBSession,
# Base,
# BaseMixin
#)
class Job(Base, BaseMixin):
__tablename__ = 'jobs'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
title = sa.Column(sa.Unicode(255), nullable=False)
status = sa.Column(sa.Integer, nullable=False)
map_id = sa.Column(sa.Integer, sa.ForeignKey('maps.id'),
nullable=False)
def __repr__(self):
return u"<Job('{0}')>".format(self.title)
class DatabaseConfig(Base, BaseMixin):
__tablename__ = 'database_configs'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
name = sa.Column(sa.Unicode(255), nullable=False)
type = sa.Column(sa.Unicode())
host = sa.Column(sa.Unicode())
port = sa.Column(sa.Integer)
database_name = sa.Column(sa.Unicode())
user = sa.Column(sa.Unicode())
query = sa.Column(sa.Unicode())
workspace_id = sa.Column(sa.Integer, sa.ForeignKey('workspaces.id'),
nullable=False)
def __repr__(self):
return u"<DatabaseConfig('{0}')>".format(self.name)
| mit | -287,510,345,046,188,900 | 27.255814 | 72 | 0.61893 | false | 3.393855 | false | false | false |
barneygale/mcocr | app/server.py | 1 | 2546 | import StringIO
import asyncore
import socket
import urlparse
import re
import settings as settings_herp
import os
import mimetypes
import time
import traceback
import docs
import http
mimetypes.init()
response_reasons = {
200: 'OK',
304: 'Not Modified',
404: 'Not Found',
500: 'Internal Server Error',
501: 'Not Implemented'}
handlers = {}
for name in dir(docs):
if name.endswith('Doc'):
handlers[re.compile(getattr(docs, name).expression)] = getattr(docs, name)
class Server:
def __init__(self):
#Settings handler
self.settings = settings_herp.Settings()
try:
self.settings.load()
except:
self.settings.create()
def serve_forever(self):
self.client_dispatcher = self.ConnectionDispatcher(self.settings)
asyncore.loop(use_poll = False)
#######
#######
#Dispatches incoming connections to a new handler.
class ConnectionDispatcher(asyncore.dispatcher):
id = 0
current_id = 1
def __init__(self, settings):
asyncore.dispatcher.__init__(self)
self.settings = settings
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((settings['server_host'], settings['server_port']))
self.listen(settings['server_limit'])
def handle_accept(self):
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
handler = Server.ConnectionHandler(sock)
handler.settings = self.settings
handler.id = self.current_id
self.current_id += 1
class ConnectionHandler(asyncore.dispatcher):
rbuff = ""
wbuff = ""
def handle_read(self):
self.rbuff += self.recv(self.settings['server_buffersize'])
try:
request = http.HTTPRequest()
request.decode(self.rbuff)
self.rbuff = ""
for i in handlers.iteritems():
m = i[0].match(request._data['path_path'])
if m:
i[1].handle_request(self, request, m.groupdict())
return
#Error state: no handlers recognise the URL!
err = http.HTTPResponse(responsecode=501)
print err.encode()
self.do_write(err.encode())
except http.BufferUnderflowException:
print "Waiting for more data..."
def do_write(self, data):
self.wbuff += data
def handle_write(self):
if self.wbuff:
sent = self.send(self.wbuff)
print "Wrote %d bytes" % sent
self.wbuff = self.wbuff[sent:]
if len(self.wbuff) == 0:
self.close()
def writable(self):
return len(self.wbuff) > 0
def handle_error(self):
err = http.HTTPResponse(responsecode=500, response=traceback.format_exc())
self.do_write(err.encode())
| bsd-3-clause | -3,733,676,247,853,530,600 | 23.018868 | 77 | 0.677141 | false | 3.186483 | false | false | false |
google-research/google-research | caql/dual_method.py | 1 | 5310 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual functions."""
import tensorflow.compat.v1 as tf
def get_I(l, u):
# l,u are None, n_layer tensors
# Ip: active relu units
# I: unstable relu units
Ip = tf.where(
tf.logical_and(tf.greater_equal(l, 0.), tf.greater(u, 0.)),
tf.ones_like(u), tf.zeros_like(u))
I = tf.where(
tf.logical_and(tf.greater(u, 0.), tf.less(l, 0.)), tf.ones_like(u),
tf.zeros_like(u))
return Ip, I
def get_D(l, u, Ip, I):
# D matrix for each layer
D = Ip + tf.where(tf.greater(I, 0.5), tf.divide(u, u - l), tf.zeros_like(I))
return D
def create_dual_approx(num_layers, batch_size, action_max, W_T_list, b_T_list,
action_tensor_center, return_full_info=False):
#layers_n: number of hidden units each layer
#W_T_list, b_T_list: multiplicatie and bias weights for each layer
#action_tensor_center: raw input, y: one-hot encoding of labels
# List of bounds (l_i,u_i) for i = 2,...,K-1
l_list = [tf.zeros_like(action_tensor_center)]
u_list = [tf.zeros_like(action_tensor_center)]
# List of transition matrices D_i for i = 2,...,K-1
D_list = [tf.zeros_like(action_tensor_center)]
# Indicators of spanning ReLu neurons for i = 2,...,K-1
I_list = [tf.zeros_like(action_tensor_center)]
# Indicators of active ReLu neurons for i = 2,...,K-1
Ip_list = [tf.zeros_like(action_tensor_center)]
# Final list of duals nu_i for i = 2,...,K-1
Nu_list = [tf.zeros([batch_size, W_T_list[0].get_shape().as_list()[1], 1])]
# Initialize Nu_K
Nu_K = -tf.expand_dims(-tf.eye(1), axis=-1)
# Final list of b_i'*nu_{i+1} for i = 1,...,K-1
gamma_list = [b_T_list[0]]
# Pre-compute bounds for layer 2
# Initialize Nu_hat_1
Nu_hat_1 = tf.tile(tf.expand_dims(W_T_list[0], axis=0), [batch_size, 1, 1])
# Initialize bounds
l_2 = tf.matmul(action_tensor_center,
W_T_list[0]) + gamma_list[0] - action_max * tf.norm(
Nu_hat_1, 1, axis=1, keepdims=False)
u_2 = tf.matmul(action_tensor_center,
W_T_list[0]) + gamma_list[0] + action_max * tf.norm(
Nu_hat_1, 1, axis=1, keepdims=False)
# Add to list (store in vector format)
l_list.append(l_2)
u_list.append(u_2)
# Recursion
for i in range(2, num_layers):
# form Ip, I
Ip_i, I_i = get_I(l_list[i - 1], u_list[i - 1])
I_list.append(I_i)
Ip_list.append(Ip_i)
# form D
D_i = get_D(l_list[i - 1], u_list[i - 1], Ip_i, I_i)
D_list.append(D_i)
# initialize nu_i
Nu_list.append(tf.einsum('ij,jk->ijk', D_i, W_T_list[i - 1]))
# initialize gamma_i
gamma_list.append(b_T_list[i - 1])
# if final iteration, update with Nu_K
if i == num_layers - 1:
Nu_K = tf.tile(Nu_K, [Nu_list[i - 1].get_shape().as_list()[0], 1, 1])
Nu_list[i - 1] = tf.einsum('ijk,ikm->ijm', Nu_list[i - 1], Nu_K)
gamma_list[i - 1] = tf.einsum('ij,ijm->im', gamma_list[i - 1], Nu_K)
# initialize next layer bounds
l_ip1 = tf.einsum('ij,ijm->im', l_list[i - 1] * I_list[i - 1],
tf.nn.relu(-Nu_list[i - 1]))
u_ip1 = -tf.einsum('ij,ijm->im', l_list[i - 1] * I_list[i - 1],
tf.nn.relu(Nu_list[i - 1]))
# update nu for layers i-1,...,2
for j in range(i - 1, 1, -1):
Nu_hat_j = tf.einsum('jk,ikm->ijm', W_T_list[j - 1], Nu_list[j])
Nu_list[j - 1] = tf.einsum('ij,ijk->ijk', D_list[j - 1], Nu_hat_j)
l_ip1 = tf.add(
l_ip1,
tf.einsum('ij,ijm->im', l_list[j - 1] * I_list[j - 1],
tf.nn.relu(-Nu_list[j - 1])))
u_ip1 = tf.subtract(
u_ip1,
tf.einsum('ij,ijm->im', l_list[j - 1] * I_list[j - 1],
tf.nn.relu(Nu_list[j - 1])))
# update nu_hat_1
Nu_hat_1 = tf.einsum('jk,ikm->ijm', W_T_list[0], Nu_list[1])
# start sum
psi = tf.einsum('ij,ijm->im', action_tensor_center,
Nu_hat_1) + gamma_list[i - 1]
# update gamma for layers 1,...,i-1
for j in range(1, i):
gamma_list[j - 1] = tf.einsum('ij,ijm->im', b_T_list[j - 1], Nu_list[j])
psi = tf.add(psi, gamma_list[j - 1])
Nu_hat_1_norm = tf.norm(Nu_hat_1, 1, axis=1, keepdims=False)
if i < num_layers - 1:
# finalize bounds
l_ip1 = tf.add(l_ip1, psi - action_max * Nu_hat_1_norm)
u_ip1 = tf.add(u_ip1, psi + action_max * Nu_hat_1_norm)
# add to list
l_list.append(l_ip1)
u_list.append(u_ip1)
else:
# compute J_tilde
J_tilde = -psi - action_max * Nu_hat_1_norm - u_ip1
if return_full_info:
return (-J_tilde, l_list, u_list, D_list, Nu_list, gamma_list, psi, l_ip1,
u_ip1, Nu_hat_1)
else:
return -J_tilde
| apache-2.0 | 5,858,996,064,217,988,000 | 31.181818 | 78 | 0.575706 | false | 2.652348 | false | false | false |
googleapis/python-game-servers | samples/snippets/update_cluster.py | 1 | 2348 | #!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Game Servers sample for updating a game server cluster.
Example usage:
python update_cluster.py --project-id <project-id> --location <location> --realm-id <realm-id> --cluster-id <cluster-id>
"""
import argparse
from google.cloud import gaming
from google.cloud.gaming_v1.types import game_server_clusters
from google.protobuf import field_mask_pb2 as field_mask
# [START cloud_game_servers_cluster_update]
def update_cluster(project_id, location, realm_id, cluster_id):
"""Updates a game server cluster."""
client = gaming.GameServerClustersServiceClient()
request = game_server_clusters.UpdateGameServerClusterRequest(
game_server_cluster=game_server_clusters.GameServerCluster(
name=f"projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}",
labels={"label-key-1": "label-value-1", "label-key-2": "label-value-2"},
),
update_mask=field_mask.FieldMask(paths=["labels"]),
)
operation = client.update_game_server_cluster(request)
print(f"Update cluster operation: {operation.operation.name}")
operation.result(timeout=120)
# [END cloud_game_servers_cluster_update]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--project-id', help='Your cloud project ID.', required=True)
parser.add_argument('--location', help='Your realm location.', required=True)
parser.add_argument('--realm-id', help='Your realm ID.', required=True)
parser.add_argument('--cluster-id', help='Your game server cluster ID.', required=True)
args = parser.parse_args()
update_cluster(args.project_id, args.location, args.realm_id, args.cluster_id)
| apache-2.0 | -2,334,753,272,189,716,500 | 38.79661 | 124 | 0.721039 | false | 3.77492 | false | false | false |
mclaughlin6464/pearce | bin/optimization/cosmo_hyperparams_mcmc.py | 1 | 2197 | import emcee as mc
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
training_file = '/home/users/swmclau2/scratch/PearceRedMagicXiCosmo.hdf5'
a = 1.0
z = 1./a-1.0
fixed_params = {'z':z, 'r':24.06822623}
n_leaves, n_overlap = 10, 2
em_method = 'gp'
emu = OriginalRecipe(training_file, method = em_method, fixed_params=fixed_params, downsample_factor = 0.1)
# TODO downsample sampling?
def nll(p):
emu._emulator.set_parameter_vector(p)
ll = emu._emulator.lnlikelihood(emu.downsample_y, quiet=True)
return -ll if np.isfinite(ll) else 1e25
def lnprior(theta):
return -np.inf if np.any(np.logical_or(theta < -15, theta > 15)) else 0
def lnprob(theta, *args):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp - nll(theta, *args)
#p0 = emu._emulator.get_parameter_vector()
#p0 = np.array([ 0. , 10.6161248, 1.8339794, 7.342365 , 10.6371797,
# 2.2441632, 13.8155106, 11.3512804, 3.1795786, 4.6846614,
# 1. , 5.0188608, 3.7658774, -1.5383083])
p0 = np.array([-12.0550382, 0.1054246, 0.2661017, 5.6407612, 0.2408568, 1.1295944,
0.3643993, 11.5649985, 4.9071932, 4.7031938, 1., 11.7621938,
10.6279446, 0., 10.6161248, 1.8339794, 7.342365 10.6371797,
2.2441632, 13.8155106, 11.3512804 3.1795786, 4.6846614 1.,
5.0188608, 3.7658774, -1.5383083])
nwalkers = 100
nsteps = 2000
nburn = 0
num_params = p0.shape[0]#len(emu.get_param_names())+1
pos0 = p0+np.random.randn(nwalkers, num_params)
ncores = 16
savedir = '/home/users/swmclau2/scratch/'
chain_fname = path.join(savedir, '%d_walkers_%d_steps_cosmo_hyperparams.npy'%(nwalkers, nsteps))
pnames = ['amp']
pnames.extend(emu.get_param_names())
with open(chain_fname, 'w') as f:
f.write('#' + '\t'.join(pnames)+'\t'+ '\t'.join(pnames)+'\tamp'+'\n')
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob, threads=ncores)
for result in sampler.sample(pos0, iterations = nsteps, storechain=False):
with open(chain_fname, 'a') as f:
np.savetxt(f, result[0])
| mit | 4,898,353,105,985,238,000 | 32.287879 | 107 | 0.646791 | false | 2.449275 | false | false | false |
Azure/azure-sdk-for-python | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py | 1 | 1805 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobStateAuditRecord(Model):
"""The Data Lake Analytics job state audit records for tracking the lifecycle
of a job.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar new_state: The new state the job is in.
:vartype new_state: str
:ivar time_stamp: The time stamp that the state change took place.
:vartype time_stamp: datetime
:ivar requested_by_user: The user who requests the change.
:vartype requested_by_user: str
:ivar details: The details of the audit log.
:vartype details: str
"""
_validation = {
'new_state': {'readonly': True},
'time_stamp': {'readonly': True},
'requested_by_user': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'new_state': {'key': 'newState', 'type': 'str'},
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'requested_by_user': {'key': 'requestedByUser', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(self, **kwargs):
super(JobStateAuditRecord, self).__init__(**kwargs)
self.new_state = None
self.time_stamp = None
self.requested_by_user = None
self.details = None
| mit | -5,939,820,053,492,980,000 | 34.392157 | 81 | 0.584488 | false | 4.187935 | false | false | false |
tcalmant/ipopo | tests/shell/test_eventadmin.py | 1 | 5604 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the EventAdmin shell commands
:author: Thomas Calmant
"""
# Standard library
import threading
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.ipopo.constants import use_ipopo
import pelix.framework
import pelix.services
import pelix.shell
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class DummyEventHandler(object):
"""
Dummy event handler
"""
def __init__(self):
"""
Sets up members
"""
# Topic of the last received event
self.last_event = None
self.last_props = {}
self.__event = threading.Event()
def handle_event(self, topic, properties):
"""
Handles an event received from EventAdmin
"""
# Keep received values
self.last_event = topic
self.last_props = properties
self.__event.set()
def pop_event(self):
"""
Pops the list of events
"""
# Clear the event for next try
self.__event.clear()
# Reset last event
event, self.last_event = self.last_event, None
return event
def wait(self, timeout):
"""
Waits for the event to be received
"""
self.__event.wait(timeout)
# ------------------------------------------------------------------------------
class EventAdminShellTest(unittest.TestCase):
"""
Tests the EventAdmin shell commands
"""
def setUp(self):
"""
Prepares a framework and a registers a service to export
"""
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.shell.core',
'pelix.services.eventadmin',
'pelix.shell.eventadmin'))
self.framework.start()
# Get the Shell service
context = self.framework.get_bundle_context()
svc_ref = context.get_service_reference(pelix.shell.SERVICE_SHELL)
self.shell = context.get_service(svc_ref)
# Instantiate the EventAdmin component
context = self.framework.get_bundle_context()
with use_ipopo(context) as ipopo:
self.eventadmin = ipopo.instantiate(
pelix.services.FACTORY_EVENT_ADMIN,
"evtadmin", {})
def _register_handler(self, topics, evt_filter=None):
"""
Registers an event handler
:param topics: Event topics
:param evt_filter: Event filter
"""
svc = DummyEventHandler()
context = self.framework.get_bundle_context()
svc_reg = context.register_service(
pelix.services.SERVICE_EVENT_HANDLER, svc,
{pelix.services.PROP_EVENT_TOPICS: topics,
pelix.services.PROP_EVENT_FILTER: evt_filter})
return svc, svc_reg
def _run_command(self, command, *args):
"""
Runs the given shell command
"""
# Format command
if args:
command = command.format(*args)
# Run command
self.shell.execute(command)
def tearDown(self):
"""
Cleans up for next test
"""
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework(self.framework)
self.framework = None
def testTopics(self):
"""
Tests sending topics
"""
# Prepare a handler
handler, _ = self._register_handler('/titi/*')
# Send events, with a matching topic
for topic in ('/titi/toto', '/titi/', '/titi/42', '/titi/toto/tata'):
self._run_command("send {0}", topic)
self.assertEqual(handler.pop_event(), topic)
# Send events, with a non-matching topic
for topic in ('/toto/titi/42', '/titi', '/toto/42'):
self._run_command("send {0}", topic)
self.assertEqual(handler.pop_event(), None)
def testFilters(self):
"""
Tests the sending events with properties
"""
# Prepare a handler
key = "some.key"
handler, _ = self._register_handler(None, '({0}=42)'.format(key))
# Assert the handler is empty
self.assertEqual(handler.pop_event(), None)
# Send event, with matching properties
for topic in ('/titi/toto', '/toto/', '/titi/42', '/titi/toto/tata'):
value = 42
evt_props = {key: value}
self._run_command("send {0} {1}=42", topic, key, value)
# Check properties
self.assertIn(key, handler.last_props)
self.assertEqual(str(handler.last_props[key]), str(value))
self.assertIsNot(handler.last_props, evt_props)
# Check topic
self.assertEqual(handler.pop_event(), topic)
# Send events, with a non-matching properties
self._run_command("send {0} {1}=21", topic, key)
self.assertEqual(handler.pop_event(), None)
def testPost(self):
"""
Tests the post event method
"""
# Prepare a handler
handler, _ = self._register_handler('/titi/*')
# Post a message
topic = '/titi/toto'
self._run_command("post {0}", topic)
# Wait a little
handler.wait(1)
self.assertEqual(handler.pop_event(), topic)
| apache-2.0 | 4,493,402,563,130,737,000 | 27.738462 | 80 | 0.544254 | false | 4.207207 | true | false | false |
Micronaet/micronaet-bom | order_bom_explode_report/report/originali/mrp_status_hw_component_parser.py | 1 | 21833 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import xlsxwriter # XLSX export
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.report import report_sxw
from openerp.report.report_sxw import rml_parse
from datetime import datetime, timedelta
from openerp.tools.translate import _
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class Parser(report_sxw.rml_parse):
default_days = 30
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_object': self.get_object,
'get_filter': self.get_filter,
'get_date': self.get_date,
'get_parent_oc_period': self.get_parent_oc_period,
})
def get_parent_oc_period(self, parent):
res = ''
period = self.order_month.get(parent, {})
for date in sorted(period):
res += '[%s %s] ' % (date, period[date])
return res
def get_date(self, ):
''' Get filter selected
'''
return datetime.now().strftime(DEFAULT_SERVER_DATE_FORMAT)
def get_filter(self, data):
''' Get filter selected
'''
if data is None:
data = {}
days = data.get('days', self.default_days)
return _('Active production for %s days') % days
def get_object(self, data):
''' Search all mrp elements
'''
# Readability:
cr = self.cr
uid = self.uid
context = {}
user_pool = self.pool.get('res.users')
previous_status = user_pool.set_no_inventory_status(
cr, uid, value=False, context=context)
# ---------------------------------------------------------------------
# Utility:
# ---------------------------------------------------------------------
def log_line(self, line, extra=None, mode='product'):
''' Utility for log in excel file:
'''
if extra is None:
extra = {}
if mode == 'product':
WS = self.WS[mode]
# -------------------------------------------------------------
# Write header:
# -------------------------------------------------------------
if not self.counters[mode]:
counter = self.counters[mode]
header = [
# Reference:
'Parent', 'DB padre', 'Product', 'Order ref.',
# Order quantity:
#'OC') # MA
#'B' # B total
#'Delivery') # BC
# Quantity for accounting:
'Remain to MRP', # OC
'Ready', # B net
'Stock', # Stock
# Calculated data
'TODO',
# Check
'No BOM', 'Negative',
]
header.extend(extra.keys())
col = 0
for h in header:
WS.write(counter, col, h)
col += 1
self.counters[mode] += 1
# -------------------------------------------------------------
# Write data line:
# -------------------------------------------------------------
col = 0
counter = self.counters[mode]
# Write constant data:
for item in line:
WS.write(counter, col, item)
col += 1
# Write extra data:
for k in extra:
WS.write(counter, col, extra[k])
col += 1
self.counters[mode] += 1
elif mode == 'halfwork':
pass
elif mode == 'component':
pass
elif mode == 'mrp':
WS = self.WS[mode]
# -------------------------------------------------------------
# Write header:
# -------------------------------------------------------------
if not self.counters[mode]:
counter = self.counters[mode]
header = [
# Reference:
'MRP', 'OC', 'Code', 'Maked',
]
col = 0
for h in header:
WS.write(counter, col, h)
col += 1
self.counters[mode] += 1
# -------------------------------------------------------------
# Write data line:
# -------------------------------------------------------------
col = 0
counter = self.counters[mode]
# Write constant data:
for item in line:
WS.write(counter, col, item)
col += 1
self.counters[mode] += 1
else:
pass # error
return
# ---------------------------------------------------------------------
# Procedure:
# ---------------------------------------------------------------------
self.order_month = {} # Parent distribution for month
if data is None:
data = {}
# Log part
# TODO change:
filename = '/home/administrator/photo/log/parent_product.xlsx'
WB = xlsxwriter.Workbook(filename)
extra = {
'code_check': '',
'stock_check': '',
}
self.counters = {
'product': 0,
'halfwork': 0,
'component': 0,
'mrp': 0,
}
self.WS = {
'product': WB.add_worksheet(),
'halfwork': WB.add_worksheet(),
'component': WB.add_worksheet(),
'mrp': WB.add_worksheet(),
}
days = data.get('days', self.default_days)
first_supplier_id = data.get('first_supplier_id')
# Create deadline period in report:
with_deadline = data.get('with_deadline', False)
# TODO change used for now!!!!!!
#reference_date = '2016-10-15 00:00:00'
# 04/01/2017 Change after inventory
reference_date = '2017-09-01 00:00:00' # TODO keep in parameter
# TODO manage day range
if days:
limit_date = '%s 23:59:59' % (
datetime.now() + timedelta(days=days)).strftime(
DEFAULT_SERVER_DATE_FORMAT)
else:
limit_date = False
# Pool used:
company_pool = self.pool.get('res.company')
sale_pool = self.pool.get('sale.order')
#sol_pool = self.pool.get('sale.order.line')
mrp_pool = self.pool.get('mrp.production')
_logger.warning('Range period: MRP from %s, Max open MRP <= %s' % (
reference_date, limit_date or 'no limit'))
# ---------------------------------------------------------------------
# To produce line in order open
# ---------------------------------------------------------------------
# Database
parent_todo = {}
stock_used = [] # for product and halfwork
hws = {}
order_ids = company_pool.mrp_domain_sale_order_line(
cr, uid, context=context)
for order in sale_pool.browse(cr, uid, order_ids, context=context):
for line in order.order_line: # order line
# Reset log:
extra['code_check'] = ''
extra['stock_check'] = ''
if line.mx_closed:
continue
product = line.product_id # readability
default_code = product.default_code
if not default_code:
extra['code_check'] = 'no product code'
log_line(self, [
'', '', '', order.name, '', '', '', '', '', '',
], extra)
continue # TODO raise error or log
parent = default_code[:3]
if parent not in parent_todo:
# Stock, Order to produce, has stock negative
parent_todo[parent] = [
False, # 0. Parent bom for explode
0.0, # 1. Stock status net
0.0, # 2. Order to produce # merge with 1?
0, # 3. Stock status negative (total)
0, # 4. No parent bom (total)
0.0, # 5. Produce to delivery
]
# -------------------------------------------------------------
# Populate parent database:
# -------------------------------------------------------------
# Setup parent bom fist time only (and check when not present):
parent_bom = product.parent_bom_id
if parent_bom and not parent_todo[parent][0]:
# only once
parent_todo[parent][0] = parent_bom
else:
if not parent_bom:
# Check no parent
parent_todo[parent][4] += 1
# ---------------------------------------
# Stock check (use stock qty only once!):
# ---------------------------------------
if default_code not in stock_used:
extra['stock_check'] += 'used'
stock_used.append(default_code)
stock_net = product.mx_net_qty
# Check negative stock for highlight:
if stock_net < 0:
import pdb; pdb.set_trace()
parent_todo[parent][3] += 1
parent_todo[parent][1] += stock_net # Net in stock (once)
else:
extra['stock_check'] += 'not used'
stock_net = 0.0 # no used
# ---------------
# Check negative:
# ---------------
# Use utility function:
(oc_remain, not_delivered) = \
company_pool.mrp_order_line_to_produce(line)
parent_todo[parent][2] += oc_remain
parent_todo[parent][5] += not_delivered
# -------------------------------------------------------------
# Populate halfwork database:
# -------------------------------------------------------------
todo = oc_remain # XXX - stock_net + not_delivered
# Log line operation:
log_line(self, [
parent, parent_bom.code or '???', default_code,
order.name, oc_remain, not_delivered, stock_net, todo,
'' if parent_bom else 'X', '' if stock_net >= 0 else 'X',
], extra)
# -------------------------------------------------------------
# Deadline calendar (depend on wizard, section in report):
# -------------------------------------------------------------
if with_deadline and todo:
if parent not in self.order_month:
self.order_month[parent] = {}
if line.date_deadline:
deadline_period = line.date_deadline[2:7]
else:
deadline_period = '??'
if deadline_period in self.order_month[parent]:
self.order_month[parent][deadline_period] += todo
else:
self.order_month[parent][deadline_period] = todo
# -------------------------------------------------------------
# Halfwork from parent BOM
# -------------------------------------------------------------
for hw in parent_bom.bom_line_ids:
halfwork = hw.product_id
if halfwork.relative_type != 'half':
continue
if halfwork not in hws: # halfwork browse obj
hws[halfwork] = [
0.0, # 0. Needed
halfwork.mx_net_qty, # 1. Net (after - MRP) # TODO remove MRP ?
{}, # 2. XXX total component for check double order?
# XXX No OF
]
# Update total TODO * q. in BOM:
hws[halfwork][0] += todo * hw.product_qty
# Save total for this bom (parent and halfwork) = key
# XXX used for not order double pipes?
hws[halfwork][2][
(parent, halfwork)] = hw.product_qty
# ---------------------------------------------------------------------
# Clean HW for unload production:
# ---------------------------------------------------------------------
mrp_ids = mrp_pool.search(cr, uid, [
# State filter:
#('state', '!=', 'cancel'), # not correct !!!
# Period filter (only up not down limit)
('date_planned', '>=', reference_date),
], context=context)
# Generate MRP total component report with totals:
for mrp in mrp_pool.browse(cr, uid, mrp_ids, context=context):
for sol in mrp.order_line_ids:
product = sol.product_id
qty_maked = sol.product_uom_maked_sync_qty
# TODO better use dynamic_bom_line_ids ?
# check existence
# Log product extract as MRP
log_line(self, (
mrp.name, sol.order_id.name, product.default_code,
qty_maked), mode='mrp')
for hw in product.parent_bom_id.bom_line_ids:
halfwork = hw.product_id
if halfwork.relative_type != 'half':
continue # Not used in this report
if halfwork not in hws:
continue # TODO Raise error not in bom?
hw_q = qty_maked * hw.product_qty
hws[halfwork][1] -= hw_q # - MRP # TODO check same problem
# TODO check if is bouble - MRP!!!
# ---------------------------------------------------------------------
# Prepare report:
# ---------------------------------------------------------------------
res = []
# Empty record
empty_A = ['' for n in range(0, 7)] # parent 7
empty_B = ['' for n in range(0, 6)] # halfwork 6
empty_C = ['' for n in range(0, 7)] # component 7
hw_present = [] # for highlight only first total in report (for orders)
for parent in sorted(parent_todo):
record = parent_todo[parent]
# -----------------------------------------------------------------
# BLOCK A:
# -----------------------------------------------------------------
# Parent data:
data_A = [
parent, # 0. Code
record[2], # 1. OC
record[1], # 2. Mag (Net stock - MRP calculated)
record[5], # 3. Produced to delivery
record[2], # XXX ex.: - record[1] + record[5], # 4. todo
record[3], # 5. tot. negative stock (for green-red light)
record[4], # 6. tot. no bom (for green-red light)
# TODO
]
if not record[0]: # parent bom present:
res.append(data_A + empty_B + empty_C)
continue
parent_first = True
for hw in record[0].bom_line_ids:
if not hw.product_id or hw.product_id.id in hw_present:
yet_write = True # yet write in report before
else:
hw_present.append(hw.product_id.id)
yet_write = False # yet write in report before
if not hw.product_id in hws: # hw in the list selection
continue # not in selected list create before
if parent_first:
parent_first = False
else:
data_A = empty_A # reset A
# -------------------------------------------------------------
# BLOCK B:
# -------------------------------------------------------------
halfwork = hw.product_id # readability
hw_data = hws.get(halfwork, False)
if not hw_data:
res.append(data_A + empty_B + empty_C)
continue
proposed_hw = hw_data[0] - hw_data[1]
data_B = [
hw_data[2].get(
(parent, halfwork), '?'), # total
halfwork.default_code, # hw code
hw_data[0], # Todo halfwork
hw_data[1], # Stock
proposed_hw,
yet_write, # yet write status
]
hw_first = True
for cmpt in halfwork.half_bom_ids:
if hw_first:
hw_first = False
data_AB = data_A + data_B
else:
data_AB = data_A + empty_B
# ---------------------------------------------------------
# BLOCK C:
# ---------------------------------------------------------
cmpt_net = cmpt.product_id.mx_net_qty
cmpt_of = cmpt.product_id.mx_of_in
proposed = \
proposed_hw * cmpt.product_qty - cmpt_net - cmpt_of
# Add data block directly:
res.append(data_AB + [
cmpt.product_qty, # total
cmpt.product_id.default_code, # code
proposed_hw * cmpt.product_qty,
cmpt_net,
cmpt_of,
proposed if proposed > 0.0 else '',
proposed if proposed <= 0.0 else '',
])
if hw_first: # no cmpt data (not in loop)
res.append(data_A + data_B + empty_C)
user_pool.set_no_inventory_status(
cr, uid, value=previous_status, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,514,814,840,149,318,000 | 41.809804 | 95 | 0.373609 | false | 5.272398 | false | false | false |
google/makani | analysis/control/dynamics.py | 1 | 28331 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python dynamics helpers.
Provides a simplified control-oriented model of the wing and tether.
"""
import collections
import copy
from makani.analysis.control import actuator_util
from makani.analysis.control import catenary
from makani.analysis.control import geometry
from makani.analysis.control import type_util
from makani.control import control_types
from makani.control import system_types
from makani.sim.physics import physics
import numpy as np
# Structure storing a force, moment, and position at which that force
# is applied.
ForceMomentPos = collections.namedtuple('ForceMomentPos',
['force', 'moment', 'pos'])
# Structure for storing forces and moment.
ForceMoment = collections.namedtuple('ForceMoment', ['force', 'moment'])
# Structure representing the inputs to the wing.
# thrust: Motor thrust [N] (1-by-1 np.matrix).
# motor_moment: Motor moments [N-m] (3-by-1 np.matrix).
# flaps: Flaps [rad] (kNumFlaps-by-1 np.matrix).
# wind_g: Wind speed [m/s] in ground coordinates (3-by-1 np.matrix).
WingInputs = type_util.MakeNamedVectorClass( # pylint: disable=invalid-name
'WingInputs', [('thrust', range(0, 1)),
('motor_moment', range(1, 4)),
('flaps', range(4, 4 + system_types.kNumFlaps)),
('wind_g', range(4 + system_types.kNumFlaps,
7 + system_types.kNumFlaps))])
class WingState(type_util.MakeStateClass(
'WingState', [('omega_b', range(0, 3)),
('dcm_g2b', range(3, 6)),
('wing_vel_g', range(6, 9)),
('wing_pos_g', range(9, 12))])):
"""Class representing the state of the wing.
Attributes:
omega_b: Body angular rates.
dcm_g2b: Ground to body rotation DCM. Increments in the DCM are represented
by an Euler vector.
wing_vel_g: Velocity of the wing in ground coordinates.
wing_pos_g: Position of the wing in ground coordinates.
"""
def Increment(self, tangent, step=1.0):
"""Return a state evolved from this state along a tangent direction.
Args:
tangent: A WingState.Tangent along which to move.
step: A scaling of how far to move.
Returns:
A new WingState.
"""
return WingState(omega_b=self.omega_b + step * tangent.domega_b,
dcm_g2b=(geometry.AxisToDcm(step * tangent.ddcm_g2b)
* self.dcm_g2b),
wing_vel_g=self.wing_vel_g + step * tangent.dwing_vel_g,
wing_pos_g=self.wing_pos_g + step * tangent.dwing_pos_g)
def Difference(self, other_state):
"""Inverse operation of Increment with a step size of 1.0."""
return WingState.Tangent(
domega_b=other_state.omega_b - self.omega_b,
ddcm_g2b=geometry.DcmToAxis(other_state.dcm_g2b * self.dcm_g2b.T),
dwing_vel_g=other_state.wing_vel_g - self.wing_vel_g,
dwing_pos_g=other_state.wing_pos_g - self.wing_pos_g)
@type_util.RequireMatrixArguments(None, (3, 1))
def CalcAerodynamicAngles(self, wind_g):
"""Calculates (v_rel, alpha, beta) from the current wing state.
Args:
wind_g: A 3-by-1 matrix storing the wind in g coordinates.
Returns:
A tuple (v_rel, alpha, beta).
"""
return geometry.VelocitiesToAerodynamicAngles(
self.dcm_g2b, self.wing_vel_g, wind_g)
@type_util.RequireMatrixArguments((3, 1), (3, 2), None, None)
def _CalcBridleKnotPos(tether_force_b, bridle_pos, bridle_y_offset,
bridle_radius):
"""Calculate the bridle knot position in body coordinates."""
if np.linalg.norm(tether_force_b) == 0.0:
tether_force_b = np.matrix([[0.0], [0.0], [1.0]])
# Calculate the knot point location. Here we use a bridle
# coordinate system with its origin at the bridle pivot, its
# y-axis pointing toward the starboard bridle point and its z-axis
# pointed at the knot.
bridle_coord_y = bridle_pos[:, 1] - bridle_pos[:, 0]
bridle_coord_y /= np.linalg.norm(bridle_coord_y)
bridle_coord_z = copy.copy(tether_force_b)
bridle_coord_z -= bridle_coord_y * (np.transpose(bridle_coord_y)
* tether_force_b)
bridle_coord_z /= np.linalg.norm(bridle_coord_z)
bridle_coord_origin = (bridle_pos[:, 1] + bridle_pos[:, 0]) * 0.5
bridle_coord_origin[1] += bridle_y_offset
return bridle_coord_origin + bridle_coord_z * bridle_radius
class MotorModel(object):
# pylint: disable=unused-argument
def CalcMotorForceMomentPos(self, v_rel, alpha, beta, omega_b,
thrust, motor_moment_r):
raise NotImplementedError()
class PureForceMomentMotorModel(MotorModel):
def __init__(self, rotor_params, pos_com_b):
self._dcm_r2b = geometry.AngleToDcm(
0.0, np.arctan2(rotor_params[0]['axis'][2],
rotor_params[0]['axis'][0]), 0.0)
self._pos_com_b = np.matrix(pos_com_b).T
# pylint: disable=unused-argument
@type_util.RequireMatrixArguments(None, None, None, None, (3, 1), (1, 1),
(3, 1))
def CalcMotorForceMomentPos(self, v_rel, alpha, beta, omega_b,
thrust, motor_moment_r):
# NOTE: This neglects motor reaction torques, and assumes that
# MixRotors cancels the non-zero torque about the center-of-mass
# that results from pure thrusting.
motor_force_r = np.matrix([[thrust[0, 0]], [0.0], [0.0]])
return ForceMomentPos(
self._dcm_r2b * motor_force_r, self._dcm_r2b * motor_moment_r,
self._pos_com_b)
class MotorMixerMotorModel(MotorModel):
"""Model the commanded thrust and moment by calling MixRotors."""
def __init__(self, rotor_databases, air_density, weights, rotor_params,
rotor_control_params, hover_flight_mode=False):
self._dcm_r2b = geometry.AngleToDcm(
0.0, np.arctan2(rotor_params[0]['axis'][2],
rotor_params[0]['axis'][0]), 0.0)
self._rotor_databases = rotor_databases
self._air_density = air_density
self._weights = weights
self._rotor_params = rotor_params
self._rotor_control_params = rotor_control_params
self._hover_flight_mode = hover_flight_mode
@type_util.RequireMatrixArguments(None, None, (3, 1), (1, 1), (3, 1))
def CalcRotorSpeeds(self, v_rel, omega_b, thrust, motor_moment_r):
thrust_moment = {
'thrust': thrust[0, 0],
'moment': [motor_moment_r[i, 0] for i in range(3)]
}
return actuator_util.MixRotors(
thrust_moment, self._weights, v_rel, [omega_b[i, 0] for i in range(3)],
control_types.kStackingStateNormal, self._hover_flight_mode,
self._air_density, self._rotor_params, self._rotor_control_params)
@type_util.RequireMatrixArguments(None, None, None, None, (3, 1), (1, 1),
(3, 1))
def CalcMotorForceMomentPos(self, v_rel, alpha, beta, omega_b,
thrust, motor_moment_r):
rotor_speeds = self.CalcRotorSpeeds(v_rel, omega_b, thrust, motor_moment_r)
total_force = np.matrix(np.zeros((3, 1)))
total_moment = np.matrix(np.zeros((3, 1)))
v_rel_b = geometry.AerodynamicAnglesToRelativeVelocity(v_rel, alpha, beta)
for i in range(rotor_speeds.shape[0]):
rotor_speed = rotor_speeds[i, 0]
if self._rotor_params[i]['dir'] == system_types.kPositiveX:
direction = 1.0
else:
direction = -1.0
rotor_velocity = direction * rotor_speed
rotor_pos_b = np.matrix(self._rotor_params[i]['pos']).T
v_freestream = np.dot(
self._dcm_r2b[:, 0].T, v_rel_b + np.cross(omega_b.T, rotor_pos_b.T).T)
v_freestream *= (1.0 - self._rotor_params[i]['local_pressure_coeff'])**0.5
rotor_thrust = self._rotor_databases[i].CalcThrust(
rotor_speed, v_freestream[0, 0], self._air_density)
rotor_torque = direction * self._rotor_databases[i].CalcTorque(
rotor_speed, v_freestream[0, 0], self._air_density)
motor_force_b = self._dcm_r2b * np.matrix([[rotor_thrust], [0.0], [0.0]])
lever_arm_moment_b = np.cross(
rotor_pos_b.T, motor_force_b.T).T
aero_moment_b = self._dcm_r2b * np.matrix([[rotor_torque], [0.0], [0.0]])
gyro_moment_b = np.cross(
self._rotor_params[i]['I'] * rotor_velocity * self._dcm_r2b[:, 0].T,
omega_b.T).T
total_force += motor_force_b
total_moment += lever_arm_moment_b + aero_moment_b + gyro_moment_b
return ForceMomentPos(
total_force, total_moment, np.matrix(np.zeros((3, 1))))
class TetherForceModel(object):
# pylint: disable=unused-argument
def CalcBodyForce(self, dcm_g2b, wing_pos_g, wing_vel_g, wind_g):
raise NotImplementedError()
class ConstantTetherForceModel(TetherForceModel):
"""Simple model of tether force as constant in ground coordinates."""
def __init__(self, force_g):
self.SetForce(force_g)
# pylint: disable=unused-argument
@type_util.RequireMatrixArguments(None, (3, 3), (3, 1), (3, 1), (3, 1))
def CalcBodyForce(self, dcm_g2b, wing_pos_g, wing_vel_g, wind_g):
"""Calculate the tether force in body coordinates.
Args:
dcm_g2b: DCM rotating ground to body coordinates (3-by-3 np.matrix).
wing_pos_g: Wing positon [m] in ground coordinates (unused
3-by-1 np.matrix).
wing_vel_g: Wing velocity [m/s] in ground coordinates (unused
3-by-1 np.matrix).
wind_g: Wind velocity [m/s] in ground coordinates (unused 3-by-1
np.matrix).
Returns:
A 3-by-1 np.matrix storing the tether force in body coordinates.
"""
return dcm_g2b * self._force_g
@type_util.RequireMatrixArguments(None, (3, 1))
def SetForce(self, force_g):
"""Update the force vector.
Args:
force_g: New tether force in ground coordinates.
"""
self._force_g = copy.copy(force_g)
class SimpleSpringTetherForceModel(TetherForceModel):
"""Model of tether force as a simple spring, including bridle interactions."""
def __init__(self, spring_const, system_params):
tether_params = system_params['tether']
wing_params = system_params['wing']
self._spring_const = spring_const
self._tether_length = tether_params['length']
self._tether_drag_area = (0.25 * tether_params['section_drag_coeff']
* tether_params['length']
* tether_params['outer_diameter'])
self._air_density = system_params['phys']['rho']
self._bridle_pos = np.matrix(wing_params['bridle_pos']).T
self._bridle_y_offset = wing_params['bridle_y_offset']
self._bridle_radius = wing_params['bridle_rad']
# pylint: disable=unused-argument
@type_util.RequireMatrixArguments(None, (3, 3), (3, 1), (3, 1), (3, 1))
def CalcBodyForce(self, dcm_g2b, wing_pos_g, wing_vel_g, wind_g):
"""Calculate the tether force in body coordinates.
Args:
dcm_g2b: DCM rotating ground to body coordinates (3-by-3 np.matrix).
wing_pos_g: Wing positon [m] in ground coordinates (3-by-1 np.matrix).
wing_vel_g: Wing velocity [m/s] in ground coordinates (3-by-1 np.matrix).
wind_g: Wind velocity [m/s] in ground coordinates (3-by-1 np.matrix).
Returns:
A 3-by-1 np.matrix storing the tether force in body coordinates.
"""
# This intentionally ignores the small offset from the GSG
# position for simplicity.
bridle_knot_b = _CalcBridleKnotPos(dcm_g2b * -wing_pos_g,
self._bridle_pos,
self._bridle_y_offset,
self._bridle_radius)
bridle_knot_g = wing_pos_g + dcm_g2b.T * bridle_knot_b
tension = self._spring_const * (np.linalg.norm(bridle_knot_g)
- self._tether_length)
spring_force_g = -tension * bridle_knot_g / np.linalg.norm(bridle_knot_g)
airspeed = np.linalg.norm(wing_vel_g - wind_g)
drag = 0.5 * self._air_density * airspeed**2.0 * self._tether_drag_area
drag_force_g = drag * (wind_g - wing_vel_g) / max(airspeed, 0.1)
return dcm_g2b * (spring_force_g + drag_force_g)
class CatenaryTetherForceModel(TetherForceModel):
"""Model of tether force using catenary tension and rigid-rod drag."""
def __init__(self, tether_params, gsg_pos_g, bridle_radius, g, air_density):
"""Create a catenary tether force model.
Args:
tether_params: TetherParams dictionary.
gsg_pos_g: Position [m] of the GSG in the g-frame.
bridle_radius: Bridle radius [m] of the kite.
g: Gravitational acceleration [m/s^2].
air_density: Air density [kg/m^3].
"""
self._gsg_pos_g = np.matrix(np.reshape(gsg_pos_g, (3, 1)))
self._length = tether_params['length'] + bridle_radius
self._weight = tether_params['length'] * tether_params['linear_density'] * g
self._section_drag_coeff = tether_params['section_drag_coeff']
self._outer_diameter = tether_params['outer_diameter']
self._air_density = air_density
# pylint: disable=unused-argument
@type_util.RequireMatrixArguments(None, (3, 3), (3, 1), (3, 1), (3, 1))
def CalcBodyForce(self, dcm_g2b, wing_pos_g, wing_vel_g, wind_g):
"""Calculate the tether force in body coordinates.
Args:
dcm_g2b: DCM rotating ground to body coordinates (3-by-3 np.matrix).
wing_pos_g: Wing positon [m] in ground coordinates (3-by-1 np.matrix).
wing_vel_g: Wing velocity [m/s] in ground coordinates (3-by-1 np.matrix).
wind_g: Wind velocity [m/s] in ground coordinates (unused 3-by-1
np.matrix).
Returns:
A 3-by-1 np.matrix storing the tether force in body coordinates.
"""
# Calculate catenary tension.
horizontal_distance = (wing_pos_g[0, 0]**2.0 + wing_pos_g[1, 0]**2.0)**0.5
vertical_distance = self._gsg_pos_g[2, 0] - wing_pos_g[2, 0]
(h, v) = catenary.DimensionlessTensionsFromPoint(
horizontal_distance / self._length,
vertical_distance / self._length)
azi = np.arctan2(wing_pos_g[1, 0], wing_pos_g[0, 0])
tension_g = self._weight * np.matrix(
[[-h * np.cos(azi)], [-h * np.sin(azi)], [v]])
# Calculate drag reaction force on the wing. This is calculated by modeling
# the tether as a rigid rod that is pinned at the GSG and rotating at fixed
# angular velocity.
#
# Let
# CD = cross-sectional drag coefficient
# s = diameter of the rod
# L = length of rod
# V = velocity of the free end of the rod
# rho = air density
# The drag dD along a segment of the rod with length dx at distance x from
# the fixed end is
# dD(x) = 1/2 * rho * v(x)^2 * CD * s * dx.
# Therefore,
# dD/dx = 1/2 * rho * v(x)^2 * CD * s.
# The velocity of the segment is v(x) = x/L * V, so
# dD/dx = 1/2 * rho * x^2 / L^2 * V^2 * CD * s
# From this, we obtain the differential moment about the fixed end:
# dM/dx = x * dD/dx = 1/2 * rho * x^3 / L^2 * V^2 * CD * s.
# Integrating from x=0 to x=L yields the total moment due to drag,
# M = 1/8 * rho * L^2 * V^2 * CD * s.
# Force at the fixed end induces no moment, so the drag moment must be
# entirely balanced by a reaction force at the free end (i.e. the kite).
# The magnitude of this force, R, is
# R = M / L = 1/8 * rho * L * V^2 * CD * s.
#
# Here, we treat the rod as extending from the GSG to the body frame origin,
# and we use the wing velocity normal to the rod to determine V.
gsg_to_wing_g = wing_pos_g - self._gsg_pos_g
gsg_to_wing_dir_g = gsg_to_wing_g / np.linalg.norm(gsg_to_wing_g)
normal_vel_g = (wing_vel_g
- float(wing_vel_g.T * gsg_to_wing_dir_g)
* gsg_to_wing_dir_g)
normal_vel_mag = np.linalg.norm(normal_vel_g)
drag_direction_g = -normal_vel_g / normal_vel_mag
drag_g = (1.0 / 8.0 * self._air_density * np.linalg.norm(gsg_to_wing_g)
* normal_vel_mag**2.0 * self._section_drag_coeff
* self._outer_diameter * drag_direction_g)
return dcm_g2b * (tension_g + drag_g)
class SwigAeroModel(object):
"""Swig import of the simulator aerodynamics model."""
def __init__(self):
self._aero = physics.Aero(physics.GetAeroSimParams())
@type_util.RequireMatrixArguments(None, None, None, None,
(system_types.kNumFlaps, 1), (3, 1),
None)
def CalcFMCoeff(self, alpha, beta, reynolds_number, flaps, omega_hat,
thrust_coeff):
"""Calculates force and moment coefficients from the Swig database."""
omega_hat_vec3 = physics.Vec3()
omega_hat_vec3.x = omega_hat[0, 0]
omega_hat_vec3.y = omega_hat[1, 0]
omega_hat_vec3.z = omega_hat[2, 0]
flaps_vec = physics.VecWrapper(system_types.kNumFlaps)
for i in range(system_types.kNumFlaps):
flaps_vec.SetValue(i, flaps[i, 0])
force_moment = physics.ForceMoment()
self._aero.CalcForceMomentCoeff(alpha, beta, omega_hat_vec3.this,
flaps_vec.GetVec(), reynolds_number,
force_moment.this, thrust_coeff)
force_moment_coeff = (np.matrix([[force_moment.force.x],
[force_moment.force.y],
[force_moment.force.z]]),
np.matrix([[force_moment.moment.x],
[force_moment.moment.y],
[force_moment.moment.z]]))
return force_moment_coeff
class Wing(object):
"""Simplified model of the wing for control design.
The Wing class stores parameters defined by the environment (air
density, gravitational constant), a stateless tether force model,
a stateless aerodynamic model, and a nominal orientation.
It provides functions for calculating the ODEs that govern a 6-DOF
rigid body model.
"""
def __init__(self, system_params, sim_params, aero_model, motor_model,
tether_force_model):
"""Constructs a Wing model.
Args:
system_params: A system parameters structure from mconfig.
sim_params: A simulator parameters structure from mconfig.
aero_model: A Python class implementing a function CalcFMCoeff. See
SwigAeroModel in this module as an example.
motor_model: A MotorModel.
tether_force_model: A TetherForceModel.
"""
self._wing_area = system_params['wing']['A']
self._wing_span = system_params['wing']['b']
self._wing_chord = system_params['wing']['c']
self._wing_mass = system_params['wing']['m']
self._wing_inertia_matrix = np.matrix(system_params['wing']['I']['d'])
self._pos_com_b = np.matrix(system_params['wing']['center_of_mass_pos']).T
# Bridle parameters.
self._bridle_pos = np.matrix(system_params['wing']['bridle_pos']).T
self._bridle_y_offset = system_params['wing']['bridle_y_offset']
self._bridle_radius = system_params['wing']['bridle_rad']
# Physics parameters.
self._g_g = np.matrix([[0.0], [0.0], [system_params['phys']['g']]])
self._air_density = system_params['phys']['rho']
self._dynamic_viscosity = sim_params['phys_sim']['dynamic_viscosity']
self._aero_model = aero_model
self._motor_model = motor_model
self._tether_force_model = tether_force_model
@type_util.RequireMatrixArguments(None, (3, 3))
def _CalcGravityForceMomentPos(self, dcm_g2b):
return ForceMomentPos(dcm_g2b * (self._wing_mass * self._g_g),
np.matrix(np.zeros((3, 1))), self._pos_com_b)
@type_util.RequireMatrixArguments(None, None, None, None, (3, 1), (1, 1),
(3, 1))
def _CalcMotorForceMomentPos(self, v_rel, alpha, beta, omega_b,
thrust, motor_moment):
"""Calculates the motor forces and moments."""
return self._motor_model.CalcMotorForceMomentPos(
v_rel, alpha, beta, omega_b, thrust, motor_moment)
@type_util.RequireMatrixArguments(None, (3, 3), (3, 1), (3, 1), (3, 1))
def _CalcTetherForceMomentPos(self, dcm_g2b, wing_pos_g, wing_vel_g, wind_g):
tether_force_b = self._tether_force_model.CalcBodyForce(dcm_g2b, wing_pos_g,
wing_vel_g, wind_g)
return ForceMomentPos(tether_force_b, np.matrix(np.zeros((3, 1))),
_CalcBridleKnotPos(tether_force_b, self._bridle_pos,
self._bridle_y_offset,
self._bridle_radius))
def CalcTetherForceG(self, state, inputs):
return state.dcm_g2b.T * self._tether_force_model.CalcBodyForce(
state.dcm_g2b, state.wing_pos_g, state.wing_vel_g, inputs.wind_g)
def CalcTetherTensionRollPitch(self, state, inputs):
tether_force_b = self._tether_force_model.CalcBodyForce(
state.dcm_g2b, state.wing_pos_g, state.wing_vel_g, inputs.wind_g)
return geometry.TetherForceCartToSph(tether_force_b)
@type_util.RequireMatrixArguments(None, None, None, None, (3, 1),
(system_types.kNumFlaps, 1), None)
def CalcAeroForceMomentPos(self, v_rel, alpha, beta, omega_b, flaps,
thrust_coeff):
"""Calculate the aerodynamic force and moments on the wing.
Args:
v_rel: Airspeed [m/s].
alpha: Angle-of-attack [rad].
beta: Angle-of-sideslip [rad].
omega_b: Wing body-rates [rad/s] (3-by-1 np.matrix).
flaps: Flap deflections (kNumFlaps-by-1 np.matrix).
thrust_coeff: Thrust coefficient [#] using wind turbine convention.
Returns:
(ForceMomentPos in body coordinates, force coeffs., moment coeffs.)
"""
reynolds_number = ((v_rel * self._wing_chord * self._air_density)
/ self._dynamic_viscosity)
dynamic_pressure = 0.5 * self._air_density * v_rel**2.0
length_scale = np.matrix([[self._wing_span],
[self._wing_chord],
[self._wing_span]])
omega_hat = np.multiply(omega_b, length_scale) / (2.0 * v_rel)
(cf, cm) = self._aero_model.CalcFMCoeff(alpha, beta, reynolds_number,
flaps, omega_hat, thrust_coeff)
return (ForceMomentPos(dynamic_pressure * self._wing_area * cf,
(dynamic_pressure * self._wing_area
* np.multiply(length_scale, cm)),
np.matrix(np.zeros((3, 1)))),
cf, cm)
def _BodyForceMomentPosToComForceMoment(self, force_moment_pos_list):
force = np.matrix(np.zeros((3, 1)))
moment = np.matrix(np.zeros((3, 1)))
for force_moment_pos in force_moment_pos_list:
force += force_moment_pos.force
moment += force_moment_pos.moment
moment += np.cross(force_moment_pos.pos - self._pos_com_b,
force_moment_pos.force, axis=0)
return ForceMoment(force, moment)
def CalcDeriv(self, state, inputs):
"""Calculates the derivative of the wing state vector.
Args:
state: A WingState.
inputs: A WingInputs.
Returns:
A WingState.Tangent containing the derivative of the state.
"""
euler_moment = np.cross(self._wing_inertia_matrix * state.omega_b,
state.omega_b, axis=0)
v_rel, alpha, beta = state.CalcAerodynamicAngles(inputs.wind_g)
# Fixing total thrust coefficient to 0.0 for this application.
# NOTE: By accounting for the rotor wake effect on the tail,
# we found that the synthesized gains yield worse flight quality than when
# the effect is ignored (see b/110491871 for details).
thrust_coeff = 0.0
aero_force_moment_pos, _, _ = self.CalcAeroForceMomentPos(
v_rel, alpha, beta, state.omega_b, inputs.flaps, thrust_coeff)
force_moment_com = self._BodyForceMomentPosToComForceMoment([
self._CalcGravityForceMomentPos(state.dcm_g2b),
self._CalcMotorForceMomentPos(
v_rel, alpha, beta, state.omega_b, inputs.thrust,
inputs.motor_moment),
self._CalcTetherForceMomentPos(state.dcm_g2b, state.wing_pos_g,
state.wing_vel_g, inputs.wind_g),
aero_force_moment_pos,
ForceMomentPos(np.matrix(np.zeros((3, 1))), euler_moment,
np.matrix(np.zeros((3, 1))))
])
# Calculate center-of-mass acceleration.
accel_com_g = (state.dcm_g2b.T * force_moment_com.force) / self._wing_mass
# Calculate body angular acceleration.
omega_b_dot = np.matrix(np.linalg.solve(self._wing_inertia_matrix,
force_moment_com.moment))
wing_accel_g = accel_com_g - state.dcm_g2b.T * (
np.cross(state.omega_b,
np.cross(state.omega_b, self._pos_com_b, axis=0), axis=0)
+ np.cross(omega_b_dot, self._pos_com_b, axis=0))
return WingState.Tangent(domega_b=omega_b_dot, ddcm_g2b=state.omega_b,
dwing_vel_g=wing_accel_g,
dwing_pos_g=state.wing_vel_g)
def CalcDVbCom(self, state, state_dot):
"""Calculates the rate of change of Vb for unit tests."""
return (state.dcm_g2b * state_dot.dwing_vel_g
- np.cross(state.omega_b, state.dcm_g2b * state.wing_vel_g, axis=0)
+ np.cross(state_dot.domega_b, self._pos_com_b, axis=0))
def CalcEnergy(self, state):
"""Calculates energy of the rigid body model for unit tests."""
wing_com_pos_g = state.wing_pos_g + state.dcm_g2b.T * self._pos_com_b
wing_com_vel_g = (state.wing_vel_g
+ (state.dcm_g2b.T
* np.cross(state.omega_b, self._pos_com_b, axis=0)))
return ((0.5 * np.transpose(state.omega_b)
* self._wing_inertia_matrix * state.omega_b)
+ (0.5 * self._wing_mass * np.transpose(wing_com_vel_g)
* wing_com_vel_g)
- self._wing_mass * np.transpose(self._g_g) * wing_com_pos_g)[0, 0]
def CalcLinearization(f, state, inputs, state_step_sizes, input_step_sizes):
"""Calculate the system matrices for the Wing model.
Produces a linearized model:
f(x + dx, u + du) ~ f(x) + A * dx + B * du
where f is an arbitrary function, x is the wing state and u are
the wing inputs.
Args:
f: A function mapping an n-by-1 np.matrix and an m-by-1 np.matrix to
a n-by-1 np.matrix.
state: An instance of a state class from type_util.
inputs: An instance of a named vector from type_util.
state_step_sizes: A vector of step sizes for the state.
input_step_sizes: A vector of step sizes for the inputs.
Returns:
A tuple (A, B) where A and B are both of type np.matrix.
"""
num_states = state.Tangent.GetDim()
num_inputs = inputs.GetDim()
num_outputs = f(state, inputs).shape[0]
dfdx = np.matrix(np.zeros((num_outputs, num_states)))
dfdu = np.matrix(np.zeros((num_outputs, num_inputs)))
for i in range(num_states):
h = state_step_sizes[i, 0]
e = state.Tangent.FromVector(np.matrix([
[1.0 if j == i else 0.0] for j in range(num_states)]))
dfdx[:, i] = (f(state.Increment(e, step=h), inputs)
- f(state.Increment(e, step=-h), inputs)) / (2.0 * h)
for i in range(num_inputs):
h = input_step_sizes[i, 0]
e = np.matrix([[1.0 if j == i else 0.0] for j in range(num_inputs)])
dfdu[:, i] = (
f(state, inputs.FromVector(inputs.ToVector() + h * e))
- f(state, inputs.FromVector(inputs.ToVector() - h * e))) / (2.0 * h)
return (dfdx, dfdu)
| apache-2.0 | -7,089,321,281,316,663,000 | 41.222057 | 80 | 0.614592 | false | 3.103747 | false | false | false |
offlinehacker/sphinxcontrib.jinjadomain | sphinxcontrib/jinjadomain.py | 1 | 3445 | """
sphinxcontrib.jinjadomain
~~~~~~~~~~~~~~~~~~~~~~~~
The jinja domain for documenting jinja templates.
:copyright: Copyright 2012 by Jaka Hudoklin
:license: BSD, see LICENSE for details.
"""
import re
import os
from sphinx import addnodes
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
from sphinx.util.docfields import GroupedField, TypedField
def jinja_resource_anchor(method, path):
path = re.sub(r'[<>:/]', '-', path)
return method.lower() + '-' + path
class JinjaResource(ObjectDescription):
doc_field_types = [
TypedField('parameter', label='Parameters',
names=('param', 'parameter', 'arg', 'argument'),
typerolename='obj', typenames=('paramtype', 'type')),
]
method = "template"
def handle_signature(self, sig, signode):
method = self.method.upper() + ' '
signode += addnodes.desc_name(method, method)
signode += addnodes.desc_name(sig, sig)
fullname = "Template" + ' ' + sig
signode['method'] = self.method
signode['path'] = sig
signode['fullname'] = fullname
return (fullname, self.method, sig)
def needs_arglist(self):
return False
def add_target_and_index(self, name_cls, sig, signode):
signode['ids'].append(jinja_resource_anchor(*name_cls[1:]))
self.env.domaindata['jinja'][self.method][sig] = (self.env.docname, '')
def get_index_text(self, modname, name):
return ''
class JinjaIndex(Index):
name = 'jinjatemplates'
localname = 'templates'
shortname = 'templates'
def __init__(self, *args, **kwargs):
super(JinjaIndex, self).__init__(*args, **kwargs)
def grouping_prefix(self, path):
return os.path.split(path)[0]
def generate(self, docnames=None):
content = {}
items = ((method, path, info)
for method, routes in self.domain.routes.iteritems()
for path, info in routes.iteritems())
items = sorted(items, key=lambda item: item[1])
for method, path, info in items:
entries = content.setdefault(self.grouping_prefix(path), [])
entries.append([
path, 0, info[0],
jinja_resource_anchor(method, path), '', '', info[1]
])
content = content.items()
content.sort(key=lambda (k, v): k)
return (content, True)
class JinjaDomain(Domain):
"""Jinja domain."""
name = 'jinja'
label = 'jinja'
object_types = {
'template': ObjType('template', 'template', 'obj'),
}
directives = {
'template': JinjaResource,
}
initial_data = {
'template': {}, # path: (docname, synopsis)
}
indices = [JinjaIndex]
@property
def routes(self):
return dict((key, self.data[key]) for key in self.object_types)
def clear_doc(self, docname):
for typ, routes in self.routes.iteritems():
for path, info in routes.items():
if info[0] == docname:
del routes[path]
def get_objects(self):
for method, routes in self.routes.iteritems():
for path, info in routes.iteritems():
anchor = jinja_resource_anchor(method, path)
yield (path, path, method, info[0], anchor, 1)
def setup(app):
app.add_domain(JinjaDomain)
| bsd-2-clause | 4,406,314,289,829,284,400 | 27.708333 | 79 | 0.584906 | false | 3.892655 | false | false | false |
deuscoin-org/deuscoin-core | qa/rpc-tests/bip9-softforks.py | 1 | 8792 | #!/usr/bin/env python2
# Copyright (c) 2015 The Deuscoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(hexlify(tx.serialize()))
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
stop_nodes(self.nodes)
wait_deuscoinds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main() | mit | 258,358,214,866,683,870 | 38.968182 | 110 | 0.645018 | false | 3.771772 | true | false | false |
gooftroop/Zeus | ef5/lib/handlers/authentication.py | 1 | 5728 | """
"""
import logging
import settings
import tornado.gen as gen
from lib.auth import login, logout, is_authenticated
from lib.error.exceptions import AuthenticationError
class LoginHandler(BaseHandler):
"""
TODO
As designed, Tornado is stateless, which means that everything goes back to client
This can be unsecure, so in the future we might consider patching Tornado to check
session if xsrf, element session id, and current user is not in a cookie(s)
"""
logger = logging.getLogger("auth")
def get_setup_mode(self):
try:
with open("/tmp/fcemode", "r") as myfile:
return myfile.read()
except (IOError, Exception) as e:
self.logger.warning("Unable to open setupmode file: {0}".format(e))
return None
def get(self):
# Set client headers
self.set_header('Content-Type', 'application/json')
self.set_header(settings.XSRF_TOKEN, self.xsrf_token)
response = {}
########################################################################################################
# Get setup mode
########################################################################################################
ret = self.get_setup_mode()
self.logger.info("Server in initial setup mode? {0}".format((ret is not None)))
response["initial-setup-mode"] = ret is not None
########################################################################################################
# Get serial number
########################################################################################################
response["serial-number"] = ret if ret else ""
########################################################################################################
# Get hostname
########################################################################################################
proc = subprocess.Popen(["hostname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
if err is not None:
self.logger.error(err)
response["hostname"] = out
self.response.respond(response)
@gen.coroutine
# TODO HTTP basic auth support
def post(self):
username = None
try:
username = self.request.body_arguments["username"]
if isinstance(username, list):
username = username[0]
except (web.MissingArgumentError, KeyError) as e:
pass
# Fall through to check existence of password
try:
password = self.request.body_arguments["password"]
if isinstance(password, list):
password = password[0]
if not username:
msg = "Username is required"
self.logger.error(msg)
self.send_error(status_code=401, reason=msg)
return
except (web.MissingArgumentError, KeyError) as e:
# Attempt to use HTTP Basic Auth
basic_auth = self.request.headers.get('Authorization')
if not basic_auth or not basic_auth.startswidth('Basic'):
msg = "Username and Password are required" if not username else "Password is required"
self.logger.error(msg)
self.send_error(status_code=401, reason=msg)
return
else:
decoded = base64.decodestring(basic_auth[6:])
username, password = decoded.split(':', 2)
self.clear_header('Authorization')
try:
yield login(self, username, password)
# Now that we've authenticated, get the setup mode and serial number
# TODO the following is similar to atlasHandler.get. Let's see if we can generalize
# TODO convert strings to consts
# TODO this relies on hard coded context 'default' - this needs to be dynamic
try:
# TODO This needs to be converted to using the Element DAO
connection = ElementXMLRPC(url=settings.ELEMENT_URL,
session_id=self._new_cookie.get(settings.ELEMENT_SESSION_ID).value)
self.response.respond({"message": "Login Successful"})
except ElementError as e:
msg = "An error occurred while connecting to Element '{0}': {1}".format(type, e)
self.logger.exception(msg)
self.redirect("/atlas/api/logout", permanent=True)
except AuthenticationError as e:
# TODO we should check to see if we can resolve any messages coming from pam to readable messages
msg = "Login Failed. {0}".format(str(e))
self.logger.error(msg)
self.logger.error(traceback.format_exc())
self.send_error(status_code=401, reason=msg)
except Exception as e:
msg = "Login Failed. {0}".format(str(e))
self.logger.error(msg)
self.logger.error(traceback.format_exc())
self.send_error(status_code=401, reason=msg)
class LogoutHandler(BaseHandler):
"""
"""
logger = logging.getLogger("auth")
@gen.coroutine
def get(self):
"""
:return:
"""
try:
yield logout(self)
self.clear_cookie(settings.XSRF_TOKEN)
self.response.respond({"message": "Logout Successful"})
except AuthenticationError as e:
msg = "Logout Failed. {0}".format(str(e))
self.logger.error(msg)
self.send_error(status_code=400, reason=msg)
| mit | -561,951,879,802,041,500 | 36.933775 | 112 | 0.516585 | false | 4.90411 | false | false | false |
jcshen007/cloudstack | systemvm/patches/debian/config/opt/cloud/bin/cs/CsHelper.py | 1 | 7368 | # -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" General helper functions
for use in the configuration process
"""
import subprocess
import logging
import os.path
import re
import shutil
from netaddr import *
from pprint import pprint
PUBLIC_INTERFACES = {"router" : "eth2", "vpcrouter" : "eth1"}
STATE_COMMANDS = {"router" : "ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'",
"vpcrouter" : "ip addr | grep eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'"}
def reconfigure_interfaces(router_config, interfaces):
for interface in interfaces:
cmd = "ip link show %s | grep 'state DOWN'" % interface.get_device()
for device in execute(cmd):
if " DOWN " in device:
cmd = "ip link set %s up" % interface.get_device()
# If redundant only bring up public interfaces that are not eth1.
# Reason: private gateways are public interfaces.
# master.py and keepalived will deal with eth1 public interface.
if router_config.is_redundant() and interface.is_public():
state_cmd = STATE_COMMANDS[router_config.get_type()]
logging.info("Check state command => %s" % state_cmd)
state = execute(state_cmd)[0]
logging.info("Route state => %s" % state)
if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "MASTER":
execute(cmd)
else:
execute(cmd)
def is_mounted(name):
for i in execute("mount"):
vals = i.lstrip().split()
if vals[0] == "tmpfs" and vals[2] == name:
return True
return False
def mount_tmpfs(name):
if not is_mounted(name):
execute("mount tmpfs %s -t tmpfs" % name)
def umount_tmpfs(name):
if is_mounted(name):
execute("umount %s" % name)
def rm(name):
os.remove(name) if os.path.isfile(name) else None
def rmdir(name):
if name:
shutil.rmtree(name, True)
def mkdir(name, mode, fatal):
try:
os.makedirs(name, mode)
except OSError as e:
if e.errno != 17:
print "failed to make directories " + name + " due to :" + e.strerror
if(fatal):
sys.exit(1)
def updatefile(filename, val, mode):
""" add val to file """
handle = open(filename, 'r')
for line in handle.read():
if line.strip().lstrip() == val:
return
# set the value
handle.close()
handle = open(filename, mode)
handle.write(val)
handle.close()
def bool_to_yn(val):
if val:
return "yes"
return "no"
def get_device_info():
""" Returns all devices on system with their ipv4 ip netmask """
list = []
for i in execute("ip addr show"):
vals = i.strip().lstrip().rstrip().split()
if vals[0] == "inet":
to = {}
to['ip'] = vals[1]
to['dev'] = vals[-1]
to['network'] = IPNetwork(to['ip'])
to['dnsmasq'] = False
list.append(to)
return list
def get_domain():
for line in open("/etc/resolv.conf"):
vals = line.lstrip().split()
if vals[0] == "domain":
return vals[1]
return "cloudnine.internal"
def get_device(ip):
""" Returns the device which has a specific ip
If the ip is not found returns an empty string
"""
for i in execute("ip addr show"):
vals = i.strip().lstrip().rstrip().split()
if vals[0] == "inet":
if vals[1].split('/')[0] == ip:
return vals[-1]
return ""
def get_ip(device):
""" Return first ip on an interface """
cmd = "ip addr show dev %s" % device
for i in execute(cmd):
vals = i.lstrip().split()
if (vals[0] == 'inet'):
return vals[1]
return ""
def definedinfile(filename, val):
""" Check if val is defined in the file """
for line in open(filename):
if re.search(val, line):
return True
return False
def addifmissing(filename, val):
""" Add something to a file
if it is not already there """
if not os.path.isfile(filename):
logging.debug("File %s doesn't exist, so create" % filename)
open(filename, "w").close()
if not definedinfile(filename, val):
updatefile(filename, val + "\n", "a")
logging.debug("Added %s to file %s" % (val, filename))
return True
return False
def get_hostname():
for line in open("/etc/hostname"):
return line.strip()
def execute(command):
""" Execute command """
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
rc = p.returncode
logging.debug("Executed: %s - exitstatus=%s " % (command, rc))
result = p.communicate()[0]
return result.splitlines()
def save_iptables(command, iptables_file):
""" Execute command """
logging.debug("Saving iptables for %s" % command)
result = execute(command)
fIptables = open(iptables_file, "w+")
for line in result:
fIptables.write(line)
fIptables.write("\n")
fIptables.close()
def execute2(command):
""" Execute command """
logging.debug("Executing: %s" % command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.wait()
return p
def service(name, op):
execute("service %s %s" % (name, op))
logging.info("Service %s %s" % (name, op))
def start_if_stopped(name):
ret = execute2("service %s status" % name)
if ret.returncode:
execute2("service %s start" % name)
def hup_dnsmasq(name, user):
pid = ""
for i in execute("ps -ef | grep %s" % name):
vals = i.lstrip().split()
if (vals[0] == user):
pid = vals[1]
if pid:
logging.info("Sent hup to %s", name)
execute("kill -HUP %s" % pid)
else:
service("dnsmasq", "start")
def copy_if_needed(src, dest):
""" Copy a file if the destination does not already exist
"""
if os.path.isfile(dest):
return
copy(src, dest)
def copy(src, dest):
"""
copy source to destination.
"""
try:
shutil.copy2(src, dest)
except IOError:
logging.Error("Could not copy %s to %s" % (src, dest))
else:
logging.info("Copied %s to %s" % (src, dest))
| apache-2.0 | -8,724,128,362,881,490,000 | 28.007874 | 173 | 0.588626 | false | 3.691383 | true | false | false |
contactless/mqtt-rpc | python/server.py | 1 | 2415 | #!/usr/bin/python
import argparse
try:
import mosquitto
except ImportError:
import paho.mqtt.client as mosquitto
import time, random
import sys
from mqttrpc import MQTTRPCResponseManager, dispatcher
import logging
logging.getLogger().setLevel(logging.DEBUG)
@dispatcher.add_method
def foobar(**kwargs):
return kwargs["foo"] + kwargs["bar"]
class TMQTTRPCServer(object):
def __init__(self, client, driver_id):
self.client = client
self.driver_id = driver_id
def on_mqtt_message(self, mosq, obj, msg):
print msg.topic
print msg.payload
parts = msg.topic.split('/')
driver_id = parts[3]
service_id = parts[4]
method_id = parts[5]
client_id = parts[6]
response = MQTTRPCResponseManager.handle(msg.payload, service_id, method_id, dispatcher)
self.client.publish("/rpc/v1/%s/%s/%s/%s/reply" % (self.driver_id, service_id, method_id, client_id ), response.json)
def setup(self):
for service, method in dispatcher.iterkeys():
self.client.publish("/rpc/v1/%s/%s/%s" % (self.driver_id, service, method), "1", retain=True)
self.client.subscribe("/rpc/v1/%s/%s/%s/+" % (self.driver_id, service, method))
# Dispatcher is dictionary {<method_name>: callable}
dispatcher[("test", "echo")] = lambda s: s
dispatcher[("test", "add")] = lambda a, b: a + b
if __name__ =='__main__':
parser = argparse.ArgumentParser(description='Sample RPC server', add_help=False)
parser.add_argument('-h', '--host', dest='host', type=str,
help='MQTT host', default='localhost')
parser.add_argument('-u', '--username', dest='username', type=str,
help='MQTT username', default='')
parser.add_argument('-P', '--password', dest='password', type=str,
help='MQTT password', default='')
parser.add_argument('-p', '--port', dest='port', type=int,
help='MQTT port', default='1883')
args = parser.parse_args()
client = mosquitto.Mosquitto()
if args.username:
client.username_pw_set(args.username, args.password)
rpc_server = TMQTTRPCServer(client, 'Driver')
client.connect(args.host, args.port)
client.on_message = rpc_server.on_mqtt_message
rpc_server.setup()
while 1:
rc = client.loop()
if rc != 0:
break
| mit | -7,693,251,067,194,599,000 | 24.691489 | 125 | 0.612422 | false | 3.525547 | false | false | false |
PanDAWMS/panda-server | pandaserver/taskbuffer/ErrorCode.py | 1 | 1738 | ############## errror code
# killed
EC_Kill = 100
# transfer timeout
EC_Transfer = 101
# expire
EC_Expire = 102
# aborted
EC_Aborted = 103
# wait timeout
EC_WaitTimeout = 104
# reassigned by rebrokeage
EC_Reassigned = 105
# reassigned by server-side retry
EC_Retried = 106
# retried by pilot
EC_PilotRetried = 107
# lost file (=dataservice.ErrorCode.EC_LostFile)
EC_LostFile = 110
# retried for event service
EC_EventServiceRetried = 111
# merge for event service
EC_EventServiceMerge = 112
# merge job failed
EC_MergeFailed = 113
# max attempt reached for Event Service
EC_EventServiceMaxAttempt = 114
# do nothing since other consumers are still running
EC_EventServiceWaitOthers = 115
# killed since unused and unnecessary any more
EC_EventServiceUnused = 116
# didn't process any events on WN
EC_EventServiceUnprocessed = 117
# didn't process any events on WN and last consumer
EC_EventServiceLastUnprocessed = 118
# all event ranges failed
EC_EventServiceAllFailed = 119
# associated consumer generated ES merge
EC_EventServiceKillOK = 120
# associated consumer failed
EC_EventServiceKillNG = 121
# killed for preemption
EC_EventServicePreemption = 122
# retred but didn't process any events on WN
EC_EventServiceNoEvent = 123
# input files inconsitent with JEDI
EC_EventServiceInconsistentIn = 124
# No event service queues available for new consumers
EC_EventServiceNoEsQueues = 125
# Closed in bad job status
EC_EventServiceBadStatus = 126
# failed to lock semaphore for job cloning
EC_JobCloningUnlock = 200
# worker is done before job is done
EC_WorkerDone = 300
# file not found
class EC_NotFound:
pass
# file relocated
class EC_Redirect:
def __init__(self,url):
self.url = url
| apache-2.0 | 3,461,998,897,724,639,700 | 17.688172 | 53 | 0.758343 | false | 3.361702 | false | false | false |
coteyr/home-assistant | homeassistant/components/rollershutter/demo.py | 1 | 2390 | """
Demo platform for the rollor shutter component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.rollershutter import RollershutterDevice
from homeassistant.const import EVENT_TIME_CHANGED
from homeassistant.helpers.event import track_utc_time_change
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo roller shutters."""
add_devices([
DemoRollershutter(hass, 'Kitchen Window', 0),
DemoRollershutter(hass, 'Living Room Window', 100),
])
class DemoRollershutter(RollershutterDevice):
"""Represents a roller shutter."""
# pylint: disable=no-self-use
def __init__(self, hass, name, position):
self.hass = hass
self._name = name
self._position = position
self._moving_up = True
self._listener = None
@property
def name(self):
"""Returns the name of the roller shutter."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo roller shutter."""
return False
@property
def current_position(self):
"""Return the current position of the roller shutter."""
return self._position
def move_up(self, **kwargs):
"""Move the roller shutter down."""
if self._position == 0:
return
self._listen()
self._moving_up = True
def move_down(self, **kwargs):
"""Move the roller shutter up."""
if self._position == 100:
return
self._listen()
self._moving_up = False
def stop(self, **kwargs):
"""Stops the roller shutter."""
if self._listener is not None:
self.hass.bus.remove_listener(EVENT_TIME_CHANGED, self._listener)
self._listener = None
def _listen(self):
"""Listen for changes."""
if self._listener is None:
self._listener = track_utc_time_change(self.hass,
self._time_changed)
def _time_changed(self, now):
"""Track time changes."""
if self._moving_up:
self._position -= 10
else:
self._position += 10
if self._position % 100 == 0:
self.stop()
self.update_ha_state()
| mit | -3,810,794,242,415,817,000 | 27.795181 | 77 | 0.593305 | false | 4.149306 | false | false | false |
System25/gecosws-config-assistant | firstboot/serverconf/ChefConf.py | 1 | 3448 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <[email protected]>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <[email protected]>"
__license__ = "GPL-2"
import firstboot.validation as validation
class ChefConf():
def __init__(self):
self._data = {}
self._data['chef_server_uri'] = ''
self._data['chef_validation'] = ''
self._data['chef_link'] = False
self._data['chef_link_existing'] = False
def load_data(self, conf):
msg = 'ServerConf: Key "%s" not found in the configuration file.'
try:
self.set_url(conf['chef_server_uri'])
except KeyError as e:
print msg % ('chef_server_uri',)
try:
self.set_pem(conf['chef_validation'])
except KeyError as e:
print msg % ('chef_validation',)
try:
self.set_chef_link(conf['chef_link'])
except KeyError as e:
print msg % ('chef_link',)
def validate(self):
valid = validation.is_url(self._data['chef_server_uri']) and self._data['chef_validation'] != '' and self._data['chef_link'] != None and self._data['chef_link_existing'] != None
return valid
def get_url(self):
return self._data['chef_server_uri'].encode('utf-8')
def set_url(self, url):
self._data['chef_server_uri'] = url
return self
def get_pem(self):
return self._data['chef_validation'].encode('utf-8')
def set_pem(self, pem):
self._data['chef_validation'] = pem
return self
# --- Next fields are not present in the JSON file but are
# setted on runtime by Firstboot ---
def set_chef_link_existing(self, link_existing):
self._data['chef_link_existing'] = link_existing
return self
def get_chef_link_existing(self):
return self._data['chef_link_existing']
def set_chef_link(self, chef_link):
self._data['chef_link'] = chef_link
return self
def get_chef_link(self):
return self._data['chef_link']
def get_node_name(self):
if not 'node_name' in self._data:
self._data['node_name'] = ''
return self._data['node_name'].encode('utf-8')
def set_node_name(self, node_name):
self._data['node_name'] = node_name
return self
def get_admin_name(self):
if not 'chef_admin_name' in self._data:
self._data['chef_admin_name'] = ''
return self._data['chef_admin_name'].encode('utf-8')
def set_admin_name(self, admin_name):
self._data['chef_admin_name'] = admin_name
return self
| gpl-2.0 | -7,859,253,604,361,442,000 | 32.784314 | 185 | 0.619269 | false | 3.523517 | false | false | false |
tovmeod/anaf | minidetector/__init__.py | 1 | 2048 | from useragents import search_strings
class Middleware(object):
@staticmethod
def process_request(request):
"""Adds a "mobile" attribute to the request which is True or False
depending on whether the request should be considered to come from a
small-screen device such as a phone or a PDA"""
if "HTTP_X_OPERAMINI_FEATURES" in request.META:
# Then it's running opera mini. 'Nuff said.
# Reference from:
# http://dev.opera.com/articles/view/opera-mini-request-headers/
request.mobile = True
return None
if "HTTP_ACCEPT" in request.META:
s = request.META["HTTP_ACCEPT"].lower()
if 'application/vnd.wap.xhtml+xml' in s:
# Then it's a wap browser
request.mobile = True
return None
if "HTTP_USER_AGENT" in request.META:
# This takes the most processing. Surprisingly enough, when I
# Experimented on my own machine, this was the most efficient
# algorithm. Certainly more so than regexes.
# Also, Caching didn't help much, with real-world caches.
s = request.META["HTTP_USER_AGENT"].lower()
for ua in search_strings:
if ua in s:
request.mobile = True
return None
# Otherwise it's not a mobile
request.mobile = False
return None
def detect_mobile(view):
"""View Decorator that adds a "mobile" attribute to the request which is
True or False depending on whether the request should be considered
to come from a small-screen device such as a phone or a PDA"""
def detected(request, *args, **kwargs):
Middleware.process_request(request)
return view(request, *args, **kwargs)
detected.__doc__ = "{0!s}\n[Wrapped by detect_mobile which detects if the request is from a phone]".format(view.__doc__)
return detected
__all__ = ['Middleware', 'detect_mobile']
| bsd-3-clause | 6,936,481,703,161,349,000 | 36.925926 | 124 | 0.606445 | false | 4.320675 | false | false | false |
AstroHuntsman/POCS | pocs/images.py | 1 | 7615 | import os
from astropy import units as u
from astropy import wcs
from astropy.coordinates import EarthLocation
from astropy.coordinates import FK5
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.time import Time
from collections import namedtuple
from . import PanBase
from .utils import images as img_utils
OffsetError = namedtuple('OffsetError', ['delta_ra', 'delta_dec', 'magnitude'])
class Image(PanBase):
def __init__(self, fits_file, wcs_file=None, location=None):
"""Object to represent a single image from a PANOPTES camera.
Args:
fits_file (str): Name of FITS file to be read (can be .fz)
wcs_file (str, optional): Name of FITS file to use for WCS
"""
super().__init__()
assert os.path.exists(fits_file), self.logger.warning(
'File does not exist: {}'.format(fits_file))
if fits_file.endswith('.fz'):
fits_file = img_utils.fpack(fits_file, unpack=True)
assert fits_file.lower().endswith(('.fits')), \
self.logger.warning('File must end with .fits')
self.wcs = None
self._wcs_file = None
self.fits_file = fits_file
if wcs_file is not None:
self.wcs_file = wcs_file
else:
self.wcs_file = fits_file
with fits.open(self.fits_file, 'readonly') as hdu:
self.header = hdu[0].header
assert 'DATE-OBS' in self.header, self.logger.warning(
'FITS file must contain the DATE-OBS keyword')
assert 'EXPTIME' in self.header, self.logger.warning(
'FITS file must contain the EXPTIME keyword')
# Location Information
if location is None:
cfg_loc = self.config['location']
location = EarthLocation(lat=cfg_loc['latitude'],
lon=cfg_loc['longitude'],
height=cfg_loc['elevation'],
)
# Time Information
self.starttime = Time(self.header['DATE-OBS'], location=location)
self.exptime = float(self.header['EXPTIME']) * u.second
self.midtime = self.starttime + (self.exptime / 2.0)
self.sidereal = self.midtime.sidereal_time('apparent')
self.FK5_Jnow = FK5(equinox=self.midtime)
# Coordinates from header keywords
self.header_pointing = None
self.header_ra = None
self.header_dec = None
self.header_ha = None
# Coordinates from WCS
self.pointing = None
self.ra = None
self.dec = None
self.ha = None
self.get_header_pointing()
self.get_wcs_pointing()
self._luminance = None
self._pointing = None
self._pointing_error = None
@property
def wcs_file(self):
"""WCS file name
When setting the WCS file name, the WCS information will be read,
setting the `wcs` property.
"""
return self._wcs_file
@wcs_file.setter
def wcs_file(self, filename):
if filename is not None:
try:
w = wcs.WCS(filename)
assert w.is_celestial
self.wcs = w
self._wcs_file = filename
except Exception:
self.logger.debug("Can't get WCS from FITS file (try solve_field)")
@property
def pointing_error(self):
"""Pointing error namedtuple (delta_ra, delta_dec, magnitude)
Returns pointing error information. The first time this is accessed
this will solve the field if not previously solved.
Returns:
namedtuple: Pointing error information
"""
if self._pointing_error is None:
assert self.pointing is not None, self.logger.warning(
"No world coordinate system (WCS), can't get pointing_error")
assert self.header_pointing is not None
if self.wcs is None:
self.solve_field()
mag = self.pointing.separation(self.header_pointing)
d_dec = self.pointing.dec - self.header_pointing.dec
d_ra = self.pointing.ra - self.header_pointing.ra
self._pointing_error = OffsetError(
d_ra.to(
u.arcsec), d_dec.to(
u.arcsec), mag.to(
u.arcsec))
return self._pointing_error
def get_header_pointing(self):
"""Get the pointing information from the header
The header should contain the `RA-MNT` and `DEC-MNT` keywords, from which
the header pointing coordinates are built.
"""
try:
self.header_pointing = SkyCoord(ra=float(self.header['RA-MNT']) * u.degree,
dec=float(self.header['DEC-MNT']) * u.degree)
self.header_ra = self.header_pointing.ra.to(u.hourangle)
self.header_dec = self.header_pointing.dec.to(u.degree)
# Precess to the current equinox otherwise the RA - LST method will be off.
self.header_ha = self.header_pointing.transform_to(
self.FK5_Jnow).ra.to(u.hourangle) - self.sidereal
except Exception as e:
self.logger.warning('Cannot get header pointing information: {}'.format(e))
def get_wcs_pointing(self):
"""Get the pointing information from the WCS
Builds the pointing coordinates from the plate-solved WCS. These will be
compared with the coordinates stored in the header.
"""
if self.wcs is not None:
ra = self.wcs.celestial.wcs.crval[0]
dec = self.wcs.celestial.wcs.crval[1]
self.pointing = SkyCoord(ra=ra * u.degree, dec=dec * u.degree)
self.ra = self.pointing.ra.to(u.hourangle)
self.dec = self.pointing.dec.to(u.degree)
# Precess to the current equinox otherwise the RA - LST method will be off.
self.ha = self.pointing.transform_to(self.FK5_Jnow).ra.to(u.hourangle) - self.sidereal
def solve_field(self, **kwargs):
""" Solve field and populate WCS information
Args:
**kwargs (dict): Options to be passed to `get_solve_field`
"""
solve_info = img_utils.get_solve_field(self.fits_file,
ra=self.header_pointing.ra.value,
dec=self.header_pointing.dec.value,
**kwargs)
self.wcs_file = solve_info['solved_fits_file']
self.get_wcs_pointing()
# Remove some fields
for header in ['COMMENT', 'HISTORY']:
try:
del solve_info[header]
except KeyError:
pass
return solve_info
def compute_offset(self, ref_image):
assert isinstance(ref_image, Image), self.logger.warning(
"Must pass an Image class for reference")
mag = self.pointing.separation(ref_image.pointing)
d_dec = self.pointing.dec - ref_image.pointing.dec
d_ra = self.pointing.ra - ref_image.pointing.ra
return OffsetError(d_ra.to(u.arcsec), d_dec.to(u.arcsec), mag.to(u.arcsec))
##################################################################################################
# Private Methods
##################################################################################################
def __str__(self):
return "{}: {}".format(self.fits_file, self.header_pointing)
| mit | -6,846,705,688,718,471,000 | 34.584112 | 98 | 0.562049 | false | 4.02697 | false | false | false |
chop-dbhi/omop_harvest | fabfile.py | 1 | 18855 | from __future__ import print_function, with_statement
import os
import sys
import stat
import json
import etcd
from functools import wraps
from fabric.api import *
from fabric.colors import red, yellow, white, green
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
__doc__ = """\
Help Doc
"""
# A few setup steps and environment checks
curdir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(curdir, '.project_config.json')
try:
project_config = json.loads(open(config_file, 'r').read())
except:
project_config = {
"etcd_host": env.etcd_host,
"docker_registry":env.registry_host
}
hidden_output = []
try:
venv_wrap_path = os.environ['WORKON_HOME']
except KeyError:
venv_wrap_path = None
if venv_wrap_path and os.path.exists(os.path.join(venv_wrap_path, 'omop_harvest')):
full_env_path = os.path.join(venv_wrap_path, 'omop_harvest')
else:
full_env_path = os.path.abspath('..')
venv_wrap_path = None
def get_hosts_settings():
# TODO: Will probably have to retain this to support legacy deploy.
# Load all the host settings
try:
hosts = json.loads(open(config_file).read())['hosts']
except KeyError:
abort(red('Error: No host settings are defined in the project configuration'))
# Pop the default settings
# Pre-populated defaults
# for host in hosts:
# base = base_settings.copy()
# base.update(default_settings)
# print(hosts)
# base.update(hosts[host])
# hosts[host] = base
return hosts
# ** Decorators
def virtualenv(path, venv_wrap):
"Wraps a function and prefixes the call with the virtualenv active."
if path is None:
activate = None
else:
activate = os.path.join(path, 'bin/activate')
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
if venv_wrap:
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with prefix('workon {0}'.format('omop_harvest')):
return func(*args, **kwargs)
elif path is not None and venv is None:
with prefix('source {0}'.format(activate)):
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return inner
return decorator
def host_context(func):
"Sets the context of the setting to the current host"
@wraps(func)
def decorator(*args, **kwargs):
hosts = get_hosts_settings()
with settings(**hosts[env.host]):
return func(*args, **kwargs)
return decorator
# ---------------------------------------------------------------
# Configuration Commands
# ---------------------------------------------------------------
def set_configuration(noinput=False):
'''
Takes the settings in .project_config.json file and writes them to the
appropriate etcd endpoint for this application.
ab set_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
config = json.loads(open('.project_config.json', 'r').read())
if noinput or confirm("Are you sure you want to upload your local settings?"):
client.write('/applications/omop_harvest/configuration', json.dumps(config))
def get_configuration(noinput=False):
'''
Retrieves the applications settings from etcd and generates a local settings file.
fab get_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
try:
etcd_config = client.read('/applications/omop_harvest/configuration')
except KeyError:
abort(red('Error: No host settings found on etcd'))
configuration = json.loads(etcd_config.value)
if configuration == {}:
print(red('Empty configuration found. Aborting'))
sys.exit(1)
# Establish the configuration locally
if noinput or confirm('Are you sure you want to overwrite your local settings?'):
f = open('.project_config.json', 'w')
f.write(json.dumps(configuration, indent=4, sort_keys=True))
f.close()
# ---------------------------------------------------------------
# Docker Commands
# ---------------------------------------------------------------
# TODO:
# - Continuous Integration. Automatic provisioning of services
def build_container(noinput=False):
# Check git status to make sure our build hash matches our git commit
index_status = local('git status --porcelain', capture=True)
if index_status != '':
abort('Please commit or stash any changes to git before building your container')
try:
get_configuration(noinput)
except:
if not confirm('Unable to retrieve configuration. Would you like to attempt to build this container with locally available settings?'):
sys.exit(1)
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
local('docker build -t omop_harvest-{0}:{1} .'.format(git_branch, git_hash))
def test_container():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
#local('docker run -i -t -e APP_ENV=test omop_harvest-{0}:{1} test'.format(git_branch, git_hash))
#Temporary: Anticipating new version of ATI Template
local('docker run --link memcache:mc -d -p :8000 -e CID_ENV={0} -e APP_ENV={1} omop_harvest-{2}:{3} test'.format(
env.cid_env,
env.host,
git_branch,
git_hash)
)
#
def build_and_test():
build_container(noinput=True)
test_container()
# Remote Deployment Commands
def pull_repo():
local('docker pull {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
def push_to_repo():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
try:
with hide('output'):
local("docker inspect --format='{{{{.id}}}}' omop_harvest-{0}:{1}".format(git_branch, git_hash))
except:
if confirm('Could not find most most recent container. Would you like to build it?'):
build_container()
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:{1}'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:latest'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker push {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
local('docker rmi -f {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], git_branch, git_hash))
@host_context
def deploy(commit='latest'):
run('docker pull {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], env.git_branch, commit))
#container = run('docker run -d -p :8000 -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
# env.host,
# project_config['docker_registry'],
# env.git_branch,
# commit)
#)
#Temporary: Anticipating new version of ATI Template
container = run('docker run --hostname=omop-harvest-{2}-{3} --link memcache:mc -d -p :8000 -e CID_ENV={4} -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
env.host,
project_config['docker_registry'],
env.git_branch,
commit,
env.cid_env)
)
#
port = run("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(container))
commit_msg = local('git --no-pager log --oneline -1', capture = True)
auth_token = project_config['hipchat']['auth_token']
deploy_msg = 'omop_harvest-{0}:{1} now deployed at http://{2}:{3} <a href="http://{2}:{3}">Open</a> <a href="http://{4}:4001/v2/keys/applications/omop_harvest/status">Status</a> -- {5}'.format(env.git_branch, commit, env.host_string, port, project_config['etcd_host'], commit_msg)
# Notifications
local('curl -d "room_id=529405&from=deployservice&color=yellow" --data-urlencode message="{deploy_msg}" https://cbmi.hipchat.com/v1/rooms/message?auth_token={auth_token}'.format(
deploy_msg=deploy_msg,
auth_token=auth_token
))
client = etcd.Client(host=project_config['etcd_host'])
client.write('/applications/omop_harvest/status/{0}/latest_commit'.format(env.git_branch), commit)
client.write('/applications/omop_harvest/status/{0}/latest_deploy'.format(env.git_branch), 'http://{0}:{1}'.format(env.host_string, port))
print(green('Now Running at http://{0}:{1}'.format(env.host_string, port)))
@host_context
def setup_env():
"Sets up the initial environment."
parent, project = os.path.split(env.path)
if not exists(parent):
run('mkdir -p {}}'.format(parent))
with cd(parent):
if not exists(project):
run('git clone {repo_url} {project}'.format(project=project, **env))
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
else:
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
# ---------------------------------------------------------------
# Template Bootstrap Hooks
# ---------------------------------------------------------------
@virtualenv(full_env_path, venv_wrap_path)
def harvest_bootstrap():
# Handle Settings Configuration
# TODO:
# Perhaps at this point we go out to etcd and
# find the relavent DB connection settings if
# they exist then we use those here... otherwise
# we fall back to the default sqlite stuff
print('Setup default configuration file')
with hide(*hidden_output):
local('mv .project_config.json.sample .project_config.json')
print('Make test script executable')
mode = stat.S_IMODE(os.stat('run-tests.sh').st_mode)
executable = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod('run-tests.sh', mode | executable)
# Sync DB
print(green('- Creating SQLiteDB.'))
with hide(*hidden_output):
local('./bin/manage.py syncdb --settings=omop_harvest.conf.local')
# Collect Static
print(green('- Collecting Static Files'))
with hide(*hidden_output):
local('./bin/manage.py collectstatic --noinput --settings=omop_harvest.conf.local')
# Migrations
print(green('- Run Migrations'))
with hide(*hidden_output):
local('./bin/manage.py migrate --noinput --settings=omop_harvest.conf.local')
# ---------------------------------------------------------------
# Testing and Continuous Integration Commands
# ---------------------------------------------------------------
def check_for_config(noinput):
if 'project_settings' not in project_config.keys():
if noinput or confirm(red("No configuration found. Would you like to download this applications configuration?")):
get_configuration(noinput=True)
def check_for_pg(database):
'''
Check the current Docker host for an existing instance of the specified
database. If found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps -a | awk '/{0}/ {{ print $1 }}'".format(database), capture=True)
if res:
return res.split("\n")
else:
return None
def check_for_mc():
'''
Check the current Docker host for an existing instance of memcache. If
found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps | awk '/memcache/ { print $1 }'", capture=True)
if res:
print(green('Found Memcache running at {0}'.format(res)))
return res.split("\n")
else:
return None
def test_setup(noinput=False):
'''
Examine the project for a proper configuration file.
Examine the existing environment for Harvest app's service dependencies
(Memcache, and Postgres). If they do not exists create them as containers,
build the application container and apply ETL command from the application
to the Postgres DB.
After the data load is complete, attach the application to the Postgres
container and to Memcache. Apply normal bootstrapping procedures (syncdb,
migrations, collectstatic) and load fixture container test user "cbmi" with
default password "chopchop"
'''
DB_CONTAINER_NAME = 'omop_harvest_test_db'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
mc = check_for_mc()
if not mc:
with hide('output', 'running'):
print(green('Starting Memcached Container...'))
local("docker run -d --name memcache ehazlett/memcached")
with hide('output', 'running', 'warnings'):
# Spin up a fresh Postgres instance:
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_test_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
# Create DB and User in fresh DB
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
# Build the Application Container to facilitate ETL:
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
# Run ETL on attached Postgres DB
print(green('Start ETL on attached DB'))
local('docker run --link omop_harvest_test_db:db -e APP_ENV=test --name omop_harvest_etl omop_harvest_test etl')
# Wait for ETL process to finish
local('docker wait omop_harvest_etl')
print(green('ETL Complete.'))
local('docker rm omop_harvest_etl')
# Start the application container
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_test_db:db --link memcache:mc -p :8000 -e APP_ENV=test --name omop_harvest_test_app omop_harvest_test debug', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
# Sleep to give syncdb and migrations time to run.
time.sleep(10)
print(red('\n***\nomop_harvest Test Instance now running on: http://{0}:{1}'.format(socket.gethostname(), omop_harvest_port)))
def ci_setup(noinput=False):
"Copy down the production omop_harvest database to a fresh postgres container."
# TODO
# - Make sure configuration file exists.
DB_CONTAINER_NAME = 'omop_harvest_ci_pg'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
# Spin up a fresh postgres instance:
with hide('output', 'running', 'warnings'):
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_ci_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
print(green('Dump Production DB...'))
db = parse_db(project_config['project_settings']['production']['databases']['default'])
local('export PGPASSWORD={password} && pg_dump -h {host} -U {user} -Fc {database} > tmp.dump'.format(**db))
time.sleep(2)
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
print(green('Restoring Backup to Container...'))
local('export PGPASSWORD=docker && pg_restore -h localhost -p {port} -U docker -d omop_harvest tmp.dump'.format(port=port))
local('rm tmp.dump')
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_ci_db:db -p :8000 -e APP_ENV=ci --name omop_harvest_ci omop_harvest start', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
print(red('\n***\nomop_harvest Production Clone now running on: http://localhost:{0}'.format(omop_harvest_port)))
| bsd-2-clause | -3,427,390,257,368,080,400 | 42.146453 | 284 | 0.627208 | false | 3.764976 | true | false | false |
jkriege2/JKQtPlotter | lib/jkqtmathtext/resources/xits/tools/copy-math-from-amiri.py | 2 | 1252 | import sys
import fontforge
amiri = fontforge.open(sys.argv[1])
xits = fontforge.open(sys.argv[2])
amiri.em = 1000
amiri.layers[1].is_quadratic = 0
amiri.selection.all()
amiri.unlinkReferences()
names = []
alts = []
for aglyph in amiri.glyphs():
u = aglyph.unicode
if (u in range(0x1EE00, 0x1EF00) or
u in range(0x0660, 0x066E) or
u in range(0x06F0, 0x06FA) or
u in range(0x0608, 0x060B) or
u == 0x060C):
names.append(aglyph.name)
for aglyph in amiri.glyphs():
for name in names:
if aglyph.name != name and aglyph.name.startswith(name + ".alt"):
alts.append(aglyph.name)
for name in names + alts:
aglyph = amiri[name]
if aglyph.name not in xits:
xits.createChar(aglyph.unicode, aglyph.name)
xglyph = xits[aglyph.name]
aglyph.draw(xglyph.glyphPen())
xglyph.width = aglyph.width
xglyph.round()
xglyph.autoHint()
for name in alts:
base, ext = name.split(".")
if ext.startswith("alt"):
xits[base].addPosSub("'cv01' Alternate Arabic Math symbols-1", name)
elif ext.startswith("display"):
xits[base].verticalVariants = (xits[base], xits[name])
else:
print "Unknown alternate glyph:", name
xits.save()
| lgpl-2.1 | -6,084,916,335,181,398,000 | 24.55102 | 76 | 0.640575 | false | 2.904872 | false | false | false |
pcdummy/wzlobbyserver-ng | wzlobby/protocol/protocol4.py | 1 | 6682 | # -*- coding: utf-8 -*-
# vim: set et sts=4 sw=4 encoding=utf-8:
#
# This file is part of Warzone 2100.
# Copyright (C) 2011 Warzone 2100 Project
#
# Warzone 2100 is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Warzone 2100 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Warzone 2100; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
###############################################################################
# This is the V4 Variant of the Protocol - BSON.
__all__ = ['Protocol4']
from twisted.internet import defer
from twisted.python import log
from socketrpc.twisted_srpc import SocketRPCProtocol, set_serializer, Fault
from wzlobby import settings
set_serializer('jsonlib')
NO_GAME = -402
NOT_ACCEPTABLE = -403
WRONG_LOGIN = -404
LOGIN_REQUIRED = -405
SESSION_INVALID = -406
class Protocol4(SocketRPCProtocol):
game = None
lobbyVersion = 4
def connectionMade(self):
SocketRPCProtocol.connectionMade(self)
self.debug = settings.debug
self.gameDB = self.factory.gameDB
self.db = self.factory.db
self.authenticated = False
def dispatch_call(self, method, id, args, kwargs):
if not self.authenticated \
and settings.login_required \
and method != 'login':
log.msg('Not executing %s - login required' % method)
return defer.fail(
Fault(LOGIN_REQUIRED, "Please login first!")
)
log.msg('executing docall_%s' % method)
return SocketRPCProtocol.dispatch_call(self, method, id, args, kwargs)
def docall_login(self, username, password=None, token=None):
def check_pass_cb(result):
# Login ok
self.authenticated = True
return result
def check_pass_eb(failure):
self.authenticated = False
return defer.fail(Fault(WRONG_LOGIN, "Password login failed, unknown user or wrong password!"))
def check_token_cb(result):
# Token login ok
self.authenticated = True
return result
def check_token_eb(failure):
self.authenticated = False
return defer.fail(Fault(WRONG_LOGIN, "Token login failed, unknown user or wrong password!"))
if token is None:
d = self.db.check_user_password(username, password, self.transport.getPeer().host)
d.addCallbacks(check_pass_cb, check_pass_eb)
else:
d = self.db.check_user_token(username, token, self.transport.getPeer().host)
d.addCallbacks(check_token_cb, check_token_eb)
return d
def docall_logout(self):
self.authenticated = False
return defer.succeed("")
def docall_addGame(self, *args, **kwargs):
def checkFailed(reason):
return defer.fail(
Fault(
NOT_ACCEPTABLE,
reason.getErrorMessage()
)
)
def checkDone(result):
self.gameDB.register(game)
log.msg('new game %d: "%s" from "%s".' % (game['gameId'],
game['description'].encode('utf8'),
game['hostplayer'].encode('utf8')))
return {"gameId": game['gameId'],
"result": result}
game = self.gameDB.create(self.lobbyVersion)
# Update the game with the received data
for k, v in kwargs.iteritems():
try:
game[k] = v
except KeyError:
pass
# Add hosts ip
game['host'] = self.transport.getPeer().host
d = self.gameDB.check(game)
d.addCallback(checkDone)
d.addErrback(checkFailed)
return d
def docall_delGame(self, gameId):
game = self.gameDB.get(gameId, False)
if not game:
return defer.fail(
Fault(NO_GAME, 'Game %d does not exists' % gameId)
)
self.gameDB.remove(game)
return defer.succeed('')
def docall_addPlayer(self, gameId, slot, name, username, session):
def check_cb(result):
if result:
game['currentPlayers'] += 1
return defer.succeed('')
else:
return defer.fail(Fault(SESSION_INVALID, 'Users session is invalid!'))
game = self.gameDB.get(gameId, False)
if not game:
return defer.fail(
Fault(NO_GAME, 'Game %d does not exists' % gameId)
)
d = self.db.check_user_session(username, session)
d.addCallback(check_cb)
return d
def docall_delPlayer(self, gameId, slot):
game = self.gameDB.get(gameId, False)
if not game:
return defer.fail(
Fault(NO_GAME, 'Game %d does not exists' % gameId)
)
game['currentPlayers'] -= 1
return defer.succeed('')
def docall_updatePlayer(self, gameId, slot, name):
return defer.succeed('')
def docall_list(self, maxgames=9999):
maxgames = int(maxgames);
games = []
for game in self.gameDB.itervalues():
# Skip empty games.
if not game['description']:
continue
games.append({
"host" : game["host"],
"port" : game["port"],
"description" : game["description"],
"currentPlayers" : game["currentPlayers"],
"maxPlayers" : game["maxPlayers"],
"multiVer" : game["multiVer"],
"wzVerMajor" : game["wzVerMajor"],
"wzVerMinor" : game["wzVerMinor"],
"isPrivate" : game["isPrivate"],
"modlist" : game["modlist"],
"mapname" : game["mapname"],
"hostplayer" : game["hostplayer"],
})
maxgames -= 1
if maxgames == 0:
break;
return defer.succeed(games)
| gpl-2.0 | 7,357,415,535,087,798,000 | 29.372727 | 107 | 0.550883 | false | 4.20516 | false | false | false |
RPGOne/Skynet | pytorch-master/torch/distributed/collectives.py | 1 | 2102 | import torch
class reduce_op(object):
SUM = object()
PRODUCT = object()
MAX = object()
MIN = object()
class group(object):
WORLD = object()
class _DistributedRequest(object):
def __init__(self, request):
self.request = request
def is_completed(self):
return torch._C._dist_request_is_completed(self.request)
def wait(self):
torch._C._dist_request_wait(self.request)
def get_rank():
return torch._C._dist_get_rank()
def get_num_processes():
return torch._C._dist_get_num_processes()
def isend(tensor, dst_rank):
return _DistributedRequest(torch._C._dist_isend(tensor, dst_rank))
def irecv(tensor, src_rank):
return _DistributedRequest(torch._C._dist_irecv(tensor, src_rank))
def send(tensor, dst_rank):
return torch._C._dist_send(tensor, dst_rank)
def recv(tensor, src_rank=None):
if src_rank is None:
return torch._C._dist_recv_any_source(tensor)
return torch._C._dist_recv(tensor, src_rank)
def broadcast(tensor, src_rank, group=group.WORLD):
return torch._C._dist_broadcast(tensor, src_rank, group)
def all_reduce(tensor, op=reduce_op.SUM, group=group.WORLD):
return torch._C._dist_all_reduce(tensor, op, group)
def reduce(tensor, dst_rank, op=reduce_op.SUM, group=group.WORLD):
return torch._C._dist_reduce(tensor, dst_rank, op, group)
def all_gather(tensors, tensor, group=group.WORLD):
return torch._C._dist_all_gather(tensors, tensor, group)
def gather_send(tensor, dst_rank, group=group.WORLD):
return torch._C._dist_gather_send(tensor, dst_rank, group)
def gather_recv(tensors, tensor, group=group.WORLD):
return torch._C._dist_gather_recv(tensors, tensor, group)
def scatter_send(tensors, tensor, group=group.WORLD):
return torch._C._dist_scatter_send(tensors, tensor, group)
def scatter_recv(tensor, src_rank, group=group.WORLD):
return torch._C._dist_scatter_recv(tensor, src_rank, group)
def barrier(group=group.WORLD):
return torch._C._dist_barrier(group)
def new_group(ranks):
return torch._C._dist_new_group(ranks)
| bsd-3-clause | 4,712,149,255,523,797,000 | 22.617978 | 70 | 0.685062 | false | 3.204268 | false | false | false |
getsentry/symsynd | symsynd/images.py | 1 | 4038 | import os
import bisect
from symsynd.libdebug import get_cpu_name, DebugInfo
from symsynd.exceptions import DebugInfoError
from symsynd.utils import timedsection, parse_addr
from symsynd._compat import string_types, itervalues
def get_image_cpu_name(image):
cpu_name = image.get('cpu_name')
if cpu_name is not None:
return cpu_name
return get_cpu_name(image['cpu_type'], image['cpu_subtype'])
def find_debug_images(dsym_paths, binary_images):
"""Given a list of paths and a list of binary images this returns a
dictionary of image addresses to the locations on the file system for
all found images.
"""
images_to_load = set()
with timedsection('iterimages0'):
for image in binary_images:
if get_image_cpu_name(image) is not None:
images_to_load.add(image['uuid'].lower())
images = {}
# Step one: load images that are named by their UUID
with timedsection('loadimages-fast'):
for uuid in list(images_to_load):
for dsym_path in dsym_paths:
fn = os.path.join(dsym_path, uuid)
if os.path.isfile(fn):
images[uuid] = fn
images_to_load.discard(uuid)
break
# Otherwise fall back to loading images from the dsym bundle. Because
# this loading strategy is pretty slow we do't actually want to use it
# unless we have a path that looks like a bundle. As a result we
# find all the paths which are bundles and then only process those.
if images_to_load:
slow_paths = []
for dsym_path in dsym_paths:
if os.path.isdir(os.path.join(dsym_path, 'Contents')):
slow_paths.append(dsym_path)
with timedsection('loadimages-slow'):
for dsym_path in slow_paths:
dwarf_base = os.path.join(dsym_path, 'Contents',
'Resources', 'DWARF')
if os.path.isdir(dwarf_base):
for fn in os.listdir(dwarf_base):
# Looks like a UUID we loaded, skip it
if fn in images:
continue
full_fn = os.path.join(dwarf_base, fn)
try:
di = DebugInfo.open_path(full_fn)
except DebugInfoError:
continue
for variant in di.get_variants():
uuid = str(variant.uuid)
if uuid in images_to_load:
images[uuid] = full_fn
images_to_load.discard(uuid)
rv = {}
# Now resolve all the images.
with timedsection('resolveimages'):
for image in binary_images:
cpu_name = get_image_cpu_name(image)
if cpu_name is None:
continue
uid = image['uuid'].lower()
if uid not in images:
continue
rv[parse_addr(image['image_addr'])] = images[uid]
return rv
class ImageLookup(object):
"""Helper object to locate images."""
def __init__(self, images):
self._image_addresses = []
self.images = {}
for img in images:
img_addr = parse_addr(img['image_addr'])
self._image_addresses.append(img_addr)
self.images[img_addr] = img
self._image_addresses.sort()
def iter_images(self):
return itervalues(self.images)
def get_uuids(self):
return list(self.iter_uuids())
def iter_uuids(self):
for img in self.iter_images():
yield img['uuid']
def find_image(self, addr):
"""Given an instruction address this locates the image this address
is contained in.
"""
idx = bisect.bisect_left(self._image_addresses, parse_addr(addr))
if idx > 0:
return self.images[self._image_addresses[idx - 1]]
| bsd-3-clause | -2,597,764,121,753,902,000 | 34.113043 | 75 | 0.552006 | false | 4.167183 | false | false | false |
PieterMostert/Lipgloss | view/pretty_names.py | 1 | 1617 | # LIPGLOSS - Graphical user interface for constructing glaze recipes
# Copyright (C) 2017 Pieter Mostert
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# version 3 along with this program (see LICENCE.txt). If not, see
# <http://www.gnu.org/licenses/>.
# Contact: [email protected]
# Construct prettify function
pretty_dict = {'SiO2':'SiO\u2082',
'Al2O3':'Al\u2082O\u2083',
'B2O3':'B\u2082O\u2083',
'Li2O':'Li\u2082O',
'Na2O':'Na\u2082O',
'K2O':'K\u2082O',
'P2O5':'P\u2082O\u2085',
'Fe2O3':'Fe\u2082O\u2083',
'TiO2':'TiO\u2082',
'MnO2':'MnO\u2082',
'SiO2_Al2O3':'SiO\u2082 : Al\u2082O\u2083',
'cost':'Cost',
'mass_perc_':'% weight',
'mole_perc_':'% mole'}
def prettify(text):
try:
return pretty_dict[text]
except:
return text
def pretty_entry_type(text):
if text == 'um':
return ' UMF'
elif text == 'ma':
return ' % weight'
elif text == 'mo':
return ' % mole'
else:
return ''
| gpl-3.0 | 344,815,602,470,841,860 | 32 | 70 | 0.594929 | false | 3.273279 | false | false | false |
gsnbng/erpnext | erpnext/patches/v4_2/fix_gl_entries_for_stock_transactions.py | 2 | 2129 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt
def execute():
from erpnext.stock.stock_balance import repost
repost(allow_zero_rate=True, only_actual=True)
frappe.reload_doctype("Account")
warehouse_account = frappe.db.sql("""select name, master_name from tabAccount
where ifnull(account_type, '') = 'Warehouse'""")
if warehouse_account:
warehouses = [d[1] for d in warehouse_account]
accounts = [d[0] for d in warehouse_account]
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
stock_bal = frappe.db.sql("""select sum(stock_value_difference) from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no =%s and warehouse in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(warehouses))), tuple([voucher_type, voucher_no] + warehouses))
account_bal = frappe.db.sql("""select ifnull(sum(ifnull(debit, 0) - ifnull(credit, 0)), 0)
from `tabGL Entry`
where voucher_type=%s and voucher_no =%s and account in (%s)
group by voucher_type, voucher_no""" %
('%s', '%s', ', '.join(['%s']*len(accounts))), tuple([voucher_type, voucher_no] + accounts))
if stock_bal and account_bal and abs(flt(stock_bal[0][0]) - flt(account_bal[0][0])) > 0.1:
try:
print(voucher_type, voucher_no, stock_bal[0][0], account_bal[0][0])
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print("Failed to repost: ")
print(rejected)
| agpl-3.0 | -6,838,263,425,813,662,000 | 38.425926 | 100 | 0.672147 | false | 3.126285 | false | false | false |
alessio/prey | platform/linux/prey-config.py | 1 | 21957 | #!/usr/bin/env python
################################################
# Prey Configurator for Linux
# By Tomas Pollak
# (c) 2010 - Fork Ltd. (usefork.com)
################################################
# if having trouble with the GTK theme as root, do this:
# sudo ln -s ~/.themes/ /root/.themes
################################################
# base includes
################################################
import pygtk
pygtk.require("2.0")
import gtk
import os
# from xml.dom.minidom import parseString
import re
import urllib
app_name = 'prey-config'
lang_path = 'lang'
script_path = os.sys.path[0]
################################################
# gettext localization
################################################
import locale
import gettext
# locale.setlocale(locale.LC_ALL, '')
# locale.bindtextdomain(app_name, lang_path)
gettext.bindtextdomain(app_name, lang_path)
gettext.textdomain(app_name)
_ = gettext.gettext
################################################
# vars and such
################################################
PREY_PATH = '/usr/share/prey'
CONFIG_FILE = PREY_PATH + '/config'
CONTROL_PANEL_URL = 'http://control.preyproject.com'
CONTROL_PANEL_URL_SSL = 'https://control.preyproject.com'
GUEST_ACCOUNT_NAME = 'guest_account'
VERSION = os.popen("cat " + PREY_PATH + "/version 2> /dev/null").read().strip().replace('version=', '').replace("'",'')
PAGES = ['report_options', 'control_panel_options', 'new_user', 'existing_user', 'existing_device', 'standalone_options']
class PreyConfigurator(object):
################################################
# helper functions
################################################
def get(self, name):
return self.root.get_object(name)
def text(self, name):
return self.get(name).get_text()
def checkbox(self, name):
if self.get(name).get_active() == True:
return 'y'
else:
return 'n'
################################################
# validations
################################################
def validate_email(self, string):
if len(string) > 7:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", string) != None:
return True
return False
def validate_fields(self):
if self.text('user_name') == '':
self.show_alert(_("Empty name!"), _("Please type in your name."))
return False
if self.validate_email(self.text('email')) == False:
self.show_alert(_("Invalid email"), _("Please make sure the email address you typed is valid."))
return False
if len(self.text('password')) < 6:
self.show_alert(_("Bad password"), _("Password should contain at least 6 chars. Please try again."))
return False
elif self.text('password') != self.text('password_confirm'):
self.show_alert(_("Passwords don't match"), _("Please make sure both passwords match!"))
return False
return True
################################################
# dialogs
################################################
def show_alert(self, title, message, quit = False):
dialog = gtk.MessageDialog(
parent = None,
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = message)
dialog.set_title(title)
if quit == True:
dialog.connect('response', lambda dialog, response: gtk.main_quit())
else:
dialog.connect('response', lambda dialog, response: dialog.destroy())
self.center_dialog(dialog)
dialog.show()
def show_question(self, title, message):
dialog = gtk.MessageDialog(
parent = None,
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_QUESTION,
buttons = gtk.BUTTONS_YES_NO,
message_format = message)
dialog.set_title(title)
self.center_dialog(dialog)
response = dialog.run()
dialog.destroy()
return response
def show_about(self):
dialog = self.get('about_prey_config')
self.center_dialog(dialog)
dialog.show()
def close_about(self, dialog, response):
dialog.hide()
def center_dialog(self, dialog):
if 'window' in self.__dict__:
dialog.set_transient_for(self.window)
dialog.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
################################################
# window and widget management
################################################
def get_page_name(self):
return PAGES[self.pages.get_current_page()]
def toggle_pg3_next_apply(self, button):
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if self.get('use_existing_device').get_active() == False:
button_next.hide()
button_apply.show()
button_apply.grab_default()
else:
button_apply.hide()
button_next.show()
button_next.grab_default()
def next_page(self, button):
page_name = self.get_page_name()
increment = 1
if page_name == 'control_panel_options' and self.get('new_user_option').get_active() == False:
increment = 2
if page_name == 'report_options':
if self.get('reporting_mode_cp').get_active() == True:
if self.current_api_key != '':
response = self.show_question(_("Hold your horses!"), _("Your device seems to be already synchronized with the Control Panel! Do you want to re-setup your account? (Not recommended)"))
if response == gtk.RESPONSE_NO:
return
else:
increment = 5
if page_name == 'existing_user': # then we are going to select an exising device
if not self.get_existing_user(True):
# login didn't work, so don't go to next page
return
self.pages.set_current_page(self.pages.get_current_page() + increment)
self.toggle_buttons(button, None, 1)
def prev_page(self, button):
page_name = self.get_page_name()
decrement = 1
if page_name == 'existing_user':
decrement = 2
elif page_name == 'standalone_options':
decrement = 5
if self.pages.get_current_page() != 0:
self.pages.set_current_page(self.pages.get_current_page() - decrement)
self.toggle_buttons(button, None, 1)
def toggle_buttons(self, button, tab, tab_number):
button_prev = self.get('button_prev')
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if tab_number == 0: #main settings tab
button_prev.hide()
button_next.hide()
button_apply.show()
self.hide_ssl()
else:
page_name = self.get_page_name()
if page_name == 'report_options':
button_prev.hide()
else:
button_prev.show()
if page_name == 'report_options' or page_name == 'control_panel_options' or (page_name == 'existing_user' and self.get('use_existing_device').get_active() == True):
button_apply.hide()
button_next.show()
button_next.grab_default()
else:
button_next.hide()
button_apply.show()
button_apply.grab_default()
if self.get_page_name() == 'new_user' or self.get_page_name() == 'existing_user':
self.show_ssl()
else:
self.hide_ssl()
def hide_ssl(self):
self.get('icon_ssl').hide()
self.get('lbl_ssl').hide()
def show_ssl(self):
self.get('icon_ssl').show()
self.get('lbl_ssl').show()
def set_default_action(self,button,ctrl):
button_cancel = self.get('button_cancel')
cancel_has_default = button_cancel.flags() & gtk.HAS_DEFAULT
button_prev = self.get('button_prev')
prev_has_default = button_prev.flags() & gtk.HAS_DEFAULT
button_next = self.get('button_next')
button_apply = self.get('button_apply')
if not cancel_has_default and not prev_has_default:
if button_next.flags() & gtk.VISIBLE:
button_next.grab_default()
else:
button_apply.grab_default()
def ensure_visible(self,widget,event): #ensure the widget focused is visible in the scroll window
self.get('delay').set_name('delay')
self.get('extended_headers').set_name('extended_headers')
widget_name = widget.get_name()
scrollwindow = self.get('main_settings_scrollwindow')
internal_height = self.get('main_settings').get_size()[1]
port_height = scrollwindow.allocation.height
port_vadjust = scrollwindow.get_vadjustment()
port_posn = port_vadjust.value
widget_posn = widget.allocation.y
widget_height = widget.allocation.height
if (widget_posn - port_posn) >= 0 and (widget_posn + widget_height - port_posn) <= port_height:
#widget is fully visible (even if its description or icon is not), so do nothing
return False
# for now we know there are only two possible hidden widgets so we scroll all the way up or all the way down
# if we add options to this page we will have to scroll differently
if widget_name == 'delay':
#scroll to top
port_vadjust.set_value(0)
elif widget_name == 'extended_headers':
#scroll to bottom
port_vadjust.set_value(internal_height - port_height)
return True
def key_pressed(self, widget, event):
# show about dialog on F1 keypress
if (event.keyval == gtk.keysyms.F1) \
and (event.state & gtk.gdk.CONTROL_MASK) == 0 \
and (event.state & gtk.gdk.SHIFT_MASK) == 0:
self.show_about()
return True
return False
################################################
# setting getting
################################################
def prey_exists(self):
if not os.path.exists(PREY_PATH + '/core'):
self.show_alert(_("Prey not installed"), _("Couldn't find a Prey installation on this system. Sorry."), True)
else:
return True
def is_config_writable(self):
command = 'if [ ! -w "'+PREY_PATH+'/config" ]; then echo 1; fi'
no_access = os.popen(command).read().strip()
if no_access == '1':
self.show_alert(_("Unauthorized"), _("You don't have access to manage Prey's configuration. Sorry."), True)
else:
return True
def get_setting(self, var):
command = 'grep \''+var+'=\' '+CONFIG_FILE+' | sed "s/'+var+'=\'\(.*\)\'/\\1/"'
return os.popen(command).read().strip()
def get_current_settings(self):
self.current_delay = os.popen("crontab -l | grep prey | cut -c 3-4").read()
if not self.current_delay: self.current_delay = 20
self.current_auto_connect = self.get_setting('auto_connect')
self.current_extended_headers = self.get_setting('extended_headers')
self.current_guest_account = self.guest_account_exists()
self.current_lang = self.get_setting('lang')
self.current_check_url = self.get_setting('check_url')
self.current_post_method = self.get_setting('post_method')
self.current_api_key = self.get_setting('api_key')
self.current_device_key = self.get_setting('device_key')
self.current_mail_to = self.get_setting('mail_to')
self.current_smtp_server = self.get_setting('smtp_server')
self.current_smtp_username = self.get_setting('smtp_username')
def guest_account_exists(self):
result = os.popen('id ' + GUEST_ACCOUNT_NAME + ' 2> /dev/null').read()
if result.find("uid"):
return False
else:
return True
def toggle_guest_account(self, enabled):
if enabled:
# create user and leave password blank
os.system("useradd -m " + GUEST_ACCOUNT_NAME + "; passwd -d " + GUEST_ACCOUNT_NAME)
# Authorize login with no passwords in gdm
os.system("sed -i 's/PasswordRequired=false/#PasswordRequired=false/' /etc/gdm/gdm.conf")
# Authorize login with no passwords in pam
os.system("sed -i 's/nullok_secure/nullok/' /etc/pam.d/common-auth")
else:
os.system("userdel -r " + GUEST_ACCOUNT_NAME)
os.system("sed -i 's/#PasswordRequired=false/PasswordRequired=false/' /etc/gdm/gdm.conf")
os.system("sed -i 's/nullok/nullok_secure/' /etc/pam.d/common-auth")
def display_real_settings(self):
self.get('delay').set_value(int(self.current_delay))
self.get('guest_account').set_active(self.current_guest_account)
if self.current_auto_connect == 'y':
self.get('auto_connect').set_active(True)
if self.current_extended_headers == 'y':
self.get('extended_headers').set_active(True)
self.get('check_url').set_text(self.current_check_url)
self.get('mail_to').set_text(self.current_mail_to)
self.get('smtp_server').set_text(self.current_smtp_server)
self.get('smtp_username').set_text(self.current_smtp_username)
if self.current_post_method == 'email':
self.get('reporting_mode_standalone').set_active(True)
def check_if_configured(self):
if self.current_post_method == 'http' and self.current_api_key == '':
self.show_alert(_('Welcome!'), _("It seems this is the first time you run this setup. Please set up your reporting method now, otherwise Prey won't work!"))
################################################
# setting settings
################################################
def save(self, param, value):
if param == 'check_url': value = value.replace('/', '\/')
command = 'sed -i -e "s/'+param+'=\'.*\'/'+param+'=\''+value+'\'/" '+ CONFIG_FILE
os.system(command)
def apply_settings(self, button):
self.get('button_apply').set_label(_("Saving..."))
if self.get("main_tabs").get_current_page() == 0: # main settings page
self.apply_main_settings()
else:
page_name = self.get_page_name()
if page_name == 'new_user':
if self.validate_fields():
self.create_user()
elif page_name == "existing_user": # this is an apply event, so we are creating a new device (no "advanced" device selection)
self.get_existing_user(False)
elif page_name == "existing_device":
self.apply_device_settings()
elif page_name == "standalone_options":
self.apply_standalone_settings()
self.get('button_apply').set_label('gtk-apply')
def apply_main_settings(self):
# save('lang', text('lang'))
self.save('auto_connect', self.checkbox('auto_connect'))
self.save('extended_headers', self.checkbox('extended_headers'))
if((self.checkbox('guest_account') == 'y') != self.current_guest_account):
self.toggle_guest_account(self.checkbox('guest_account') == 'y')
# check and change the crontab interval
new_delay = self.get('delay').get_value_as_int()
if new_delay != int(self.current_delay):
# print 'Updating delay in crontab...'
os.system('(crontab -l | grep -v prey; echo "*/'+str(new_delay)+' * * * * /usr/share/prey/prey.sh > /var/log/prey.log") | crontab -')
if self.check_if_configured == False:
self.show_alert(_("All good."), _("Configuration saved. Remember you still need to set up your posting method, otherwise Prey won't work!"))
else:
self.show_alert(_("All good."), _("Configuration saved!"), True)
def apply_control_panel_settings(self):
if self.current_post_method != 'http':
self.save('post_method', 'http')
if self.current_check_url != CONTROL_PANEL_URL:
self.save('check_url', CONTROL_PANEL_URL)
# we could eventually use the email as a checking method to remove prey
# i.e. "under which email was this account set up?"
# self.save('mail_to', self.email)
self.save('api_key', self.api_key)
if self.device_key != "":
self.save('device_key', self.device_key)
def apply_standalone_settings(self):
if self.current_post_method != 'email':
self.save('post_method', 'email')
self.save('check_url', self.text('check_url'))
self.save('mail_to', self.text('mail_to'))
self.save('smtp_server', self.text('smtp_server'))
self.save('smtp_username', self.text('smtp_username'))
smtp_password = self.text('smtp_password')
if smtp_password != '':
encoded_pass = os.popen('echo -n "'+ smtp_password +'" | openssl enc -base64').read().strip()
self.save('smtp_password', encoded_pass)
self.exit_configurator()
def exit_configurator(self):
self.run_prey()
self.show_alert(_("Success"), _("Configuration saved! Your device is now setup and being tracked by Prey. Happy hunting!"), True)
def run_prey(self):
os.system(PREY_PATH + '/prey.sh > /var/log/prey.log &')
################################################
# control panel api
################################################
def report_connection_issue(self):
self.show_alert(_("Problem connecting"), _("We seem to be having a problem connecting to your Control Panel. This is likely a temporary issue. Please try again in a few moments."))
def user_has_available_slots(self, string):
matches = re.search(r"<available_slots>(\w*)</available_slots>", string)
if matches and int(matches.groups()[0]) > 0:
return True
else:
return False
def get_api_key(self, string):
matches = re.search(r"<key>(\w*)</key>", string)
if matches:
self.api_key = matches.groups()[0]
def get_device_keys(self, string, has_available_slots):
hostname = os.popen("hostname").read().strip()
devices = self.get('device')
index = -1
chosen = index
liststore = gtk.ListStore(str,str)
devices.clear()
matches = re.findall(r"<device>\s*<key>(\w*)</key>.*?<title>([\s\w]*)</title>\s*</device>", string, re.DOTALL)
for match in matches:
index += 1
key = match[0]
title = match[1]
liststore.append([title,key])
if key == self.current_device_key: #set the choice because we have a matching device key
chosen = index
elif title.lower() == hostname.lower and chosen < 0: #set the choice because we likely have a matching title (but device key takes precedence)
chosen = index
if index < 0:
#self.get('create_new_device').set_active(True)
self.show_alert(_("No devices exist"), _("There are no devices currently defined in your Control Panel.\n\nPlease select the option to create a new device."))
return False
devices.set_model(liststore)
cell = gtk.CellRendererText()
devices.pack_start(cell, True)
devices.add_attribute(cell, 'text', 0)
devices.set_active(chosen)
return True
def create_user(self):
self.email = self.text('email')
params = urllib.urlencode({'user[name]': self.text('user_name'), 'user[email]': self.email, 'user[password]': self.text('password'), 'user[password_confirmation]' : self.text('password_confirm')})
# params = 'user[name]='+self.text('user_name')+'&user[email]='+self.email+'&user[password]='+self.text('password')+'&user[password_confirmation]='+self.text('password_confirm')
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/users.xml -d \"'+params+'\"').read().strip()
if result.find("<key>") != -1:
self.get_api_key(result)
self.device_key = ""
elif result.find("Email has already been taken") != -1:
self.show_alert(_("Email has already been taken"), _("That email address already exists! If you signed up previously, please go back and select the Existing User option."))
return
else:
self.show_alert(_("Couldn't create user!"), _("There was a problem creating your account. Please make sure the email address you entered is valid, as well as your password."))
return
self.apply_control_panel_settings()
self.run_prey()
self.show_alert(_("Account created!"), _("Your account has been succesfully created and configured in Prey's Control Panel.\n\nPlease check your inbox now, you should have received a verification email."), True)
def get_existing_user(self, show_devices):
self.email = self.text('existing_email')
password = self.text('existing_password')
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/profile.xml -u '+self.email+":'"+password+"'").read().strip()
if result.find('401 Unauthorized') != -1:
self.show_alert(_("User does not exist"), _("Couldn't log you in. Remember you need to activate your account opening the link we emailed you.\n\nIf you forgot your password please visit preyproject.com."))
return
if result.find("<user>") != -1:
self.get_api_key(result)
else:
self.report_connection_issue()
return False
has_available_slots = self.user_has_available_slots(result)
if not has_available_slots and not show_devices:
self.show_alert(_("Not allowed"), _("It seems you've reached your limit for devices!\n\nIf you had previously added this PC, you should select the \"Device already exists\" option to select the device from a list of devices you have already defined.\n\nIf this is a new device, you can also upgrade to a Pro Account to increase your slot count and get access to additional features. For more information, please check\nhttp://preyproject.com/plans."))
return False
if show_devices:
result = os.popen('curl -i -s -k --connect-timeout 5 '+ CONTROL_PANEL_URL_SSL + '/devices.xml -u '+self.email+":'"+password+"'").read().strip()
if result.find("</devices>") != -1:
return self.get_device_keys(result,has_available_slots)
else:
self.report_connection_issue()
return False
else:
self.device_key = ""
self.apply_control_panel_settings()
self.exit_configurator()
def apply_device_settings(self):
devices = self.get('device')
model = devices.get_model()
self.device_key = model.get_value(devices.get_active_iter(),1)
self.apply_control_panel_settings()
self.exit_configurator()
def __init__(self):
if not self.prey_exists() or not self.is_config_writable():
gtk.main()
exit(1)
self.get_current_settings()
builder = gtk.Builder()
builder.set_translation_domain(app_name)
builder.add_from_file(script_path + "/prey-config.glade")
builder.connect_signals({
"on_window_destroy" : gtk.main_quit,
"prev_page" : self.prev_page,
"next_page" : self.next_page,
"toggle_buttons" : self.toggle_buttons,
"apply_settings" : self.apply_settings,
"toggle_pg3_next_apply" : self.toggle_pg3_next_apply,
"set_default_action" : self.set_default_action,
"ensure_visible" : self.ensure_visible,
"key_pressed" : self.key_pressed,
"close_about" : self.close_about
})
self.window = builder.get_object("window")
self.window.set_title(self.window.get_title() + " (v" + VERSION + ")")
# self.window.get_settings().set_string_property('gtk-font-name', 'sans normal 11','');
self.pages = builder.get_object("reporting_mode_tabs")
self.root = builder
self.get('delay').grab_focus()
about = self.get('about_prey_config')
about.set_version(VERSION)
self.display_real_settings()
self.check_if_configured()
if __name__ == "__main__":
app = PreyConfigurator()
gtk.main()
| gpl-3.0 | -2,701,603,045,709,129,000 | 35.53411 | 455 | 0.644851 | false | 3.212436 | true | false | false |
xrmx/pylokit | setup.py | 1 | 1072 | from setuptools import setup, find_packages
import os
VERSION = "0.8.1"
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Office Suites',
]
setup(
author="Riccardo Magliocchetti",
author_email="[email protected]",
name='pylokit',
version=VERSION,
description='Python CFFI wrapper for LibreOfficeKit',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url="https://github.com/xrmx/pylokit",
license='MPL 2.0',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'cffi',
'six',
],
test_suite='pylokit.tests',
packages=find_packages(),
include_package_data=True,
zip_safe = False,
)
| mpl-2.0 | -7,979,361,998,995,849,000 | 28.777778 | 88 | 0.655784 | false | 3.549669 | false | false | false |
hroark13/android_kernel_zte_draconis | scripts/gcc-wrapper.py | 2 | 3383 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
# interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 | 7,115,874,685,368,545,000 | 34.239583 | 97 | 0.668342 | false | 4.105583 | false | false | false |
rouxcode/django-cms-plugins | cmsplugins/baseplugin/utils.py | 1 | 1125 | from __future__ import unicode_literals
from importlib import import_module
from django.utils import six
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
def get_indicator_hidden(request, instance):
html = ''
is_visible = getattr(instance, 'is_visible', True)
if request.toolbar.edit_mode_active and not is_visible:
name = _('hidden')
css_class = 'plugin-indicator-hidden'
html = '<span class="{}">{}</span>'.format(
css_class,
name,
)
return mark_safe(html)
def get_str_from_tuple(key='', properties=()):
return dict((k, v) for k, v in properties).get(key, '')
def load_object(import_path):
if not isinstance(import_path, six.string_types):
return import_path
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object'"
" must contain at least one dot."
)
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
| mit | 25,095,546,226,057,910 | 29.405405 | 70 | 0.636444 | false | 3.800676 | false | false | false |
bsandrow/hn-saved-stories | hn_saved_stories/__init__.py | 1 | 8406 |
import os
import json
import re
import sys
import requests
import lxml.html
from datetime import datetime, timedelta
from pprint import pprint as PP
from time import sleep
from urlparse import urljoin
from .utils import hn_relatime_to_datetime, get_story_id
from .logger import logger
def parse_date_header(date):
errors = []
formats = [
"%a, %d %B %Y %H:%M:%S %Z",
"%a, %d %b %Y %H:%M:%S %Z",
]
for format in formats:
try:
return datetime.strptime(date, format)
except ValueError as e:
errors.append(e)
raise errors[0]
class HNSession(object):
user_agent = 'hn-saved-stories/0.2 (https://github.com/bsandrow/hn-saved-stories/)'
max_retries = 2
retry_delay = 30
def __init__(self, headers=None):
headers = headers or {}
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
self.session = requests.Session()
self.session.headers = headers
self.last_response = None
def last_response_time(self):
""" Return the time of the last response """
if 'last_response' in self.__dict__ and self.last_response.headers.get('date'):
return parse_date_header(self.last_response.headers.get('date'))
else:
return None
def last_response_url(self):
""" Return the url of the last response """
if 'last_response' in self.__dict__:
return self.last_response.url
else:
return None
def get(self, *args, **kwargs):
""" requests.get() within the session
Wraps requests.get() within the session (so it has access to session
cookies), and also retries on failures, because timeouts seem to
happen randomly.
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = 10
retries = 0
while True:
try:
request = self.session.get(*args, **kwargs)
request.raise_for_status()
return request
except requests.exceptions.RequestException as e:
if retries < self.max_retries:
retries += 1
sleep(self.retry_delay)
logger.info("[Sleeping between requests (%ss)]" % self.retry_delay)
else:
raise
def resolve_url(self, url):
""" Resolve :url: using the most appropriate base url """
base_url = self.last_response_url() or 'https://news.ycombinator.com'
return urljoin(base_url, url)
def login(self, username, password, debug=False):
""" Log into the session using provided credentials """
try:
response = self.get('https://news.ycombinator.com/newslogin')
except requests.exceptions.HTTPError:
raise Exception("Error: Unable to retrieve login page")
doc = lxml.html.fromstring(response.text)
fields = doc.xpath('.//form[1]/input')
form_data = { x.get('name'): x.get('value') for x in fields }
form_data['u'] = username
form_data['p'] = password
if debug:
print "Login Form Data: ",
import pprint
pprint.pprint(form_data)
response = self.session.post('https://news.ycombinator.com/y', data=form_data, timeout=10)
if response.status_code != requests.codes.ok:
raise Exception("Error: Unable to successfully login.")
self.username = username
self.last_response = response
def get_saved_stories(self, max_pages=None, break_func=None):
""" Fetch the list of 'saved stories' from a profile
Fetch the list of saved stories for a Hacker News user account. The
session needs to be logged into an account for this to work.
break_func - A function that takes the current page's story list, and
returns True if we should break out of the loop.
max_pages - The maximum number of pages that we should go through
before aborting. A value of None goes through all pages.
"""
def parse_story(title, subtext):
""" Parse a story from title + subtext """
url_keys = ['url', 'comments', 'submitter_link']
story = {}
title_anchor = title.xpath('./a')[0]
comments_anchor = subtext.xpath('.//a[contains(text(), "comments") or contains(text(), "discuss")]')[0] # See Footnote [1]
story['url'] = title_anchor.get('href')
story['title'] = title_anchor.text
story['comments'] = comments_anchor.get('href')
story['submitter'] = subtext.xpath('.//a[1]//text()')[0] # See Footnote [4]
story['submitter_link'] = subtext.xpath('.//a[1]/@href')[0]
story['submitted_at'] = str( hn_relatime_to_datetime(self.last_response_time(), subtext.xpath('./text()')[1]) )
# Resolve all relative URLs
for key in story.keys():
if key in url_keys and story.get(key):
story[key] = self.resolve_url(story[key])
return get_story_id(story), story
page = 1
stories = {}
url = 'https://news.ycombinator.com/saved?id=%s' % self.username
while max_pages is None or page <= max_pages:
html = None
try:
logger.info("Page %d:" % page)
logger.debug(" url = %s" % url)
logger.info(" Fetching...")
try:
response = self.get(url)
except requests.exceptions.HTTPError as e:
raise Exception("Error: Failed to retrieve page %d, error:'%s', rurl: %s" % (page, str(e), url))
if response.text == "Can't display that.":
raise Exception("Error: Got \"Can't display that\" response.")
logger.info(" Parsing...")
html = lxml.html.fromstring(response.text)
basetime = parse_date_header(response.headers['date'])
title = html.cssselect('td.title') # See Footnote [3]
subtext = html.cssselect('td.subtext')
page_stories = dict([ parse_story(*s) for s in zip(title[1::2], subtext) ])
try:
next_link = title[-1].xpath('.//a[text() = "More"]/@href')
except IndexError:
sys.exit("Caught IndexError. Dumping HTML:" + lxml.html.tostring(html))
next_link = next_link[0] if next_link else None
stories.update(page_stories)
should_break = (break_func and break_func(page_stories)) or next_link is None
if should_break:
break
url = self.resolve_url(next_link)
page += 1
logger.info(" Sleeping (1s)...")
sleep(1)
except Exception as e:
if html:
logger.debug("Caught exception. Dumping page...")
logger.debug("______________")
logger.debug(lxml.html.tostring(html, pretty_print=True))
logger.debug("______________")
raise
logger.info("Done.")
return stories
# Footnotes
# ~~~~~~~~~
# [1] Anchor text needs 'comments,' because Polls submitted by yourself there
# is a 'add choice.' Also, if the story has no comments, then the anchor
# text is just 'discuss.'
#
# [2] '[Dead]' links remove the 'href' attribute from the anchor, so you end up
# with None as a URL.
#
# [3] 'td.title' selects 3 different things:
# 1) the number of the story (in reverse, story #1 is
# the most recently saved)
# 2) the title + link of the story
# 3) the 'More' link at the bottom of the page, which
# goes to the next page in the series.
# The series should look something like [1,2,1,2,1,2,1,2,3], #1 and #2
# alternating with #3 being the last in the list. #3 will be missing on the
# final page.
#
# [4] The '//text()' part is needed because sometimes the submitter has a
# <font> element colouring it, so text() is not a direct child of the
# anchor. E.g.:
#
# <a href="user?id=foofoobar"><font color="#3c963c">foofoobar</font></a>
| mit | 7,238,045,857,086,352,000 | 35.868421 | 134 | 0.557935 | false | 4.023935 | false | false | false |
rosarior/mayan | apps/main/__init__.py | 1 | 2420 | from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from navigation.api import register_top_menu
from navigation.api import register_links
from project_setup.api import register_setup
from project_tools.api import register_tool
from .conf.settings import SIDE_BAR_SEARCH, DISABLE_HOME_VIEW
__author__ = 'Roberto Rosario'
__copyright__ = 'Copyright 2012 Roberto Rosario'
__credits__ = ['Roberto Rosario',]
__license__ = 'GPL'
__maintainer__ = 'Roberto Rosario'
__email__ = '[email protected]'
__status__ = 'Production'
__version_info__ = {
'major': 1,
'minor': 0,
'micro': 0,
'releaselevel': 'alpha',
'serial': 0
}
def is_superuser(context):
return context['request'].user.is_staff or context['request'].user.is_superuser
maintenance_menu = {'text': _(u'maintenance'), 'view': 'maintenance_menu', 'famfam': 'wrench', 'icon': 'wrench.png'}
statistics = {'text': _(u'statistics'), 'view': 'statistics', 'famfam': 'table', 'icon': 'blackboard_sum.png', 'condition': is_superuser, 'children_view_regex': [r'statistics']}
diagnostics = {'text': _(u'diagnostics'), 'view': 'diagnostics', 'famfam': 'pill', 'icon': 'pill.png'}
sentry = {'text': _(u'sentry'), 'view': 'sentry', 'famfam': 'bug', 'icon': 'bug.png', 'condition': is_superuser}
admin_site = {'text': _(u'admin site'), 'view': 'admin:index', 'famfam': 'keyboard', 'icon': 'keyboard.png', 'condition': is_superuser}
if not DISABLE_HOME_VIEW:
register_top_menu('home', link={'text': _(u'home'), 'view': 'home', 'famfam': 'house'}, position=0)
if not SIDE_BAR_SEARCH:
register_top_menu('search', link={'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}, children_path_regex=[r'^search/'])
def get_version():
'''
Return the formatted version information
'''
vers = ['%(major)i.%(minor)i' % __version_info__, ]
if __version_info__['micro']:
vers.append('.%(micro)i' % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers)
__version__ = get_version()
if 'django.contrib.admin' in settings.INSTALLED_APPS:
register_setup(admin_site)
register_tool(maintenance_menu)
register_tool(statistics)
register_tool(diagnostics)
if 'sentry' in settings.INSTALLED_APPS:
register_tool(sentry)
| gpl-3.0 | 7,616,351,844,105,236,000 | 35.119403 | 177 | 0.659504 | false | 3.180026 | false | false | false |
annarev/tensorflow | tensorflow/python/distribute/input_lib.py | 1 | 101510 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import six
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.distribute_lib import InputReplicationMode
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.types import distribute as distribute_types
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def get_distributed_dataset(dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None):
"""Returns a distributed dataset from the given tf.data.Dataset instance.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset: a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
Returns:
A distributed dataset instance.
"""
if tf2.enabled():
return DistributedDataset(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
else:
return DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def get_distributed_datasets_from_function(dataset_fn,
input_workers,
input_contexts,
strategy,
options=None):
"""Returns a distributed dataset from the given input function.
This is a common function that is used by all strategies to return a
distributed dataset. The distributed dataset instance returned is different
depending on if we are in a TF 1 or TF 2 context. The distributed dataset
instances returned differ from each other in the APIs supported by each of
them.
Args:
dataset_fn: a function that returns a tf.data.Dataset instance.
input_workers: an InputWorkers object which specifies devices on which
iterators should be created.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
options: Default is None. `tf.distribute.InputOptions` used to control
options on how this dataset is distributed.
Returns:
A distributed dataset instance.
Raises:
ValueError: if `options.experimental_replication_mode` and
`options.experimental_place_dataset_on_device` are not consistent
"""
if (options is not None and
options.experimental_replication_mode != InputReplicationMode.PER_REPLICA
and options.experimental_place_dataset_on_device):
raise ValueError(
"When `experimental_place_dataset_on_device` is set for dataset "
"placement, you must also specify `PER_REPLICA` for the "
"replication mode")
if (options is not None and
options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device and
options.experimental_place_dataset_on_device):
raise ValueError(
"`experimental_place_dataset_on_device` can not be set to True "
"when experimental_fetch_to_device is True and "
"replication mode is set to `PER_REPLICA`")
if tf2.enabled():
return DistributedDatasetsFromFunction(input_workers, strategy,
input_contexts, dataset_fn, options)
else:
return DistributedDatasetsFromFunctionV1(
input_workers,
strategy,
input_contexts,
dataset_fn,
options)
@tf_export("distribute.DistributedIterator", v1=[])
class DistributedIteratorInterface(collections_abc.Iterator,
distribute_types.Iterator):
"""An iterator over `tf.distribute.DistributedDataset`.
`tf.distribute.DistributedIterator` is the primary mechanism for enumerating
elements of a `tf.distribute.DistributedDataset`. It supports the Python
Iterator protocol, which means it can be iterated over using a for-loop or by
fetching individual elements explicitly via `get_next()`.
You can create a `tf.distribute.DistributedIterator` by calling `iter` on
a `tf.distribute.DistributedDataset` or creating a python loop over a
`tf.distribute.DistributedDataset`.
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def get_next(self):
"""Returns the next input from the iterator for all replicas.
Example use:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.range(100).batch(2)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset_iterator = iter(dist_dataset)
>>> @tf.function
... def one_step(input):
... return input
>>> step_num = 5
>>> for _ in range(step_num):
... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),))
>>> strategy.experimental_local_results(dist_dataset_iterator.get_next())
(<tf.Tensor: shape=(1,), dtype=int64, numpy=array([10])>,
<tf.Tensor: shape=(1,), dtype=int64, numpy=array([11])>)
Returns:
A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains
the next input for all replicas.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError(
"DistributedIterator.get_next() must be implemented in descendants.")
@property
def element_spec(self):
# pylint: disable=line-too-long
"""The type specification of an element of `tf.distribute.DistributedIterator`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_iterator.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedIterator`. This returned value
is typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedIterator.element_spec() must be implemented in descendants")
def get_next_as_optional(self):
# pylint: disable=line-too-long
"""Returns a `tf.experimental.Optional` that contains the next value for all replicas.
If the `tf.distribute.DistributedIterator` has reached the end of the
sequence, the returned `tf.experimental.Optional` will have no value.
Example usage:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> global_batch_size = 2
>>> steps_per_loop = 2
>>> dataset = tf.data.Dataset.range(10).batch(global_batch_size)
>>> distributed_iterator = iter(
... strategy.experimental_distribute_dataset(dataset))
>>> def step_fn(x):
... # train the model with inputs
... return x
>>> @tf.function
... def train_fn(distributed_iterator):
... for _ in tf.range(steps_per_loop):
... optional_data = distributed_iterator.get_next_as_optional()
... if not optional_data.has_value():
... break
... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),))
... tf.print(strategy.experimental_local_results(per_replica_results))
>>> train_fn(distributed_iterator)
... # ([0 1], [2 3])
... # ([4], [])
Returns:
An `tf.experimental.Optional` object representing the next value from the
`tf.distribute.DistributedIterator` (if it has one) or no value.
"""
# pylint: enable=line-too-long
raise NotImplementedError(
"get_next_as_optional() not implemented in descendants")
@tf_export("distribute.DistributedDataset", v1=[])
class DistributedDatasetInterface(collections_abc.Iterable,
distribute_types.Iterable):
# pylint: disable=line-too-long
"""Represents a dataset distributed among devices and machines.
A `tf.distribute.DistributedDataset` could be thought of as a "distributed"
dataset. When you use `tf.distribute` API to scale training to multiple
devices or machines, you also need to distribute the input data, which leads
to a `tf.distribute.DistributedDataset` instance, instead of a
`tf.data.Dataset` instance in the non-distributed case. In TF 2.x,
`tf.distribute.DistributedDataset` objects are Python iterables.
Note: `tf.distribute.DistributedDataset` instances are *not* of type
`tf.data.Dataset`. It only supports two usages we will mention below:
iteration and `element_spec`. We don't support any other APIs to transform or
inspect the dataset.
There are two APIs to create a `tf.distribute.DistributedDataset` object:
`tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and
`tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`.
*When to use which?* When you have a `tf.data.Dataset` instance, and the
regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance
with a new batch size that is equal to the global batch size divided by the
number of replicas in sync) and autosharding (i.e. the
`tf.data.experimental.AutoShardPolicy` options) work for you, use the former
API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance,
or you would like to customize the batch splitting or sharding, you can wrap
these logic in a `dataset_fn` and use the latter API. Both API handles
prefetch to device for the user. For more details and examples, follow the
links to the APIs.
There are two main usages of a `DistributedDataset` object:
1. Iterate over it to generate the input for a single device or multiple
devices, which is a `tf.distribute.DistributedValues` instance. To do this,
you can:
* use a pythonic for-loop construct:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> @tf.function
... def train_step(input):
... features, labels = input
... return labels - 0.3 * features
>>> for x in dist_dataset:
... # train_step trains the model using the dataset elements
... loss = strategy.run(train_step, args=(x,))
... print("Loss is", loss)
Loss is PerReplica:{
0: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32)
}
Placing the loop inside a `tf.function` will give a performance boost.
However `break` and `return` are currently not supported if the loop is
placed inside a `tf.function`. We also don't support placing the loop
inside a `tf.function` when using
`tf.distribute.experimental.MultiWorkerMirroredStrategy` or
`tf.distribute.experimental.TPUStrategy` with multiple workers.
* use `__iter__` to create an explicit iterator, which is of type
`tf.distribute.DistributedIterator`
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size)
>>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
>>> @tf.function
... def distributed_train_step(dataset_inputs):
... def train_step(input):
... loss = tf.constant(0.1)
... return loss
... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None)
>>> EPOCHS = 2
>>> STEPS = 3
>>> for epoch in range(EPOCHS):
... total_loss = 0.0
... num_batches = 0
... dist_dataset_iterator = iter(train_dist_dataset)
... for _ in range(STEPS):
... total_loss += distributed_train_step(next(dist_dataset_iterator))
... num_batches += 1
... average_train_loss = total_loss / num_batches
... template = ("Epoch {}, Loss: {:.4f}")
... print (template.format(epoch+1, average_train_loss))
Epoch 1, Loss: 0.2000
Epoch 2, Loss: 0.2000
To achieve a performance improvement, you can also wrap the `strategy.run`
call with a `tf.range` inside a `tf.function`. This runs multiple steps in a
`tf.function`. Autograph will convert it to a `tf.while_loop` on the worker.
However, it is less flexible comparing with running a single step inside
`tf.function`. For example, you cannot run things eagerly or arbitrary
python code within the steps.
2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`.
`tf.distribute.DistributedDataset` generates
`tf.distribute.DistributedValues` as input to the devices. If you pass the
input to a `tf.function` and would like to specify the shape and type of
each Tensor argument to the function, you can pass a `tf.TypeSpec` object to
the `input_signature` argument of the `tf.function`. To get the
`tf.TypeSpec` of the input, you can use the `element_spec` property of the
`tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator`
object.
For example:
>>> global_batch_size = 4
>>> epochs = 1
>>> steps_per_epoch = 1
>>> mirrored_strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size)
>>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
>>> @tf.function(input_signature=[dist_dataset.element_spec])
... def train_step(per_replica_inputs):
... def step_fn(inputs):
... return tf.square(inputs)
... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,))
>>> for _ in range(epochs):
... iterator = iter(dist_dataset)
... for _ in range(steps_per_epoch):
... output = train_step(next(iterator))
... print(output)
PerReplica:{
0: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32)
}
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def __iter__(self):
"""Creates an iterator for the `tf.distribute.DistributedDataset`.
The returned iterator implements the Python Iterator protocol.
Example usage:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> print(next(distributed_iterator))
PerReplica:{
0: tf.Tensor([1 2], shape=(2,), dtype=int32),
1: tf.Tensor([3 4], shape=(2,), dtype=int32)
}
Returns:
An `tf.distribute.DistributedIterator` instance for the given
`tf.distribute.DistributedDataset` object to enumerate over the
distributed data.
"""
raise NotImplementedError("Must be implemented in descendants")
@property
def element_spec(self):
"""The type specification of an element of this `tf.distribute.DistributedDataset`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedDataset`. This returned value is
typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedDataset.element_spec must be implemented in descendants.")
@doc_controls.do_not_generate_docs
def reduce(self, initial_state, reduce_func):
raise NotImplementedError(
"DistributedDataset.reduce must be implemented in descendants.")
class InputWorkers(object):
"""A 1-to-many mapping from input worker devices to compute devices."""
def __init__(self, worker_device_pairs):
"""Initialize an `InputWorkers` object.
Args:
worker_device_pairs: A sequence of pairs:
`(input device, a tuple of compute devices fed by that input device)`.
"""
self._worker_device_pairs = worker_device_pairs
self._input_worker_devices = tuple(d for d, _ in self._worker_device_pairs)
self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f)
for _, f in self._worker_device_pairs)
@property
def num_workers(self):
return len(self._input_worker_devices)
@property
def worker_devices(self):
return self._input_worker_devices
def compute_devices_for_worker(self, worker_index):
return self._fed_devices[worker_index]
def __repr__(self):
devices = self.worker_devices
debug_repr = ",\n".join(" %d %s: %s" %
(i, devices[i], self._fed_devices[i])
for i in range(len(devices)))
return "%s:{\n%s}" % (self.__class__.__name__, debug_repr)
def serialize(self):
return self._worker_device_pairs
def deserialize(self, worker_device_pairs):
return InputWorkers(worker_device_pairs)
def _get_next_as_optional(iterator, strategy, return_per_replica=False):
"""Returns an empty dataset indicator and the next input from the iterator.
Args:
iterator: a DistributedIterator object.
strategy: the `tf.distribute.Strategy` instance.
return_per_replica: a boolean. If True, the returned data will be wrapped
with `PerReplica` structure. Otherwise it is a 2D
num_input_workers*num_replicas_per_worker list.
Returns:
A tuple (a boolean tensor indicating whether the next batch has value
globally, data from all replicas).
"""
replicas = []
worker_has_values = []
worker_devices = []
for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access
with ops.device(worker):
worker_has_value, next_element = (
iterator._iterators[i].get_next_as_list()) # pylint: disable=protected-access
# Collective all-reduce requires explicit devices for inputs.
with ops.device("/cpu:0"):
# Converting to integers for all-reduce.
worker_has_value = math_ops.cast(worker_has_value, dtypes.int64)
worker_devices.append(worker_has_value.device)
worker_has_values.append(worker_has_value)
# Make `replicas` a flat list of values across all replicas.
replicas.append(next_element)
if return_per_replica:
flattened_data = []
for per_worker_data in replicas:
flattened_data.extend(per_worker_data)
replicas = _create_per_replica(flattened_data, strategy)
# Run an all-reduce to see whether any worker has values.
# TODO(b/131423105): we should be able to short-cut the all-reduce in some
# cases.
if getattr(strategy.extended, "_support_per_replica_values", True):
# `reduce` expects a `PerReplica`, so we pass it one, even
# though it doesn't actually have a value per replica
worker_has_values = values.PerReplica(worker_has_values)
global_has_value = strategy.reduce(
reduce_util.ReduceOp.SUM, worker_has_values, axis=None)
else:
assert len(worker_has_values) == 1
global_has_value = worker_has_values[0]
global_has_value = array_ops.reshape(
math_ops.cast(global_has_value, dtypes.bool), [])
return global_has_value, replicas
def _is_statically_shaped(element_spec):
"""Test if an iterator output is statically shaped.
For sparse and ragged tensors this only tests the batch dimension.
Args:
element_spec: a nest structure of `tf.TypeSpec`. The element spec of the
dataset of the iterator.
Returns:
True if the shape is static, false otherwise.
"""
for spec in nest.flatten(element_spec):
if isinstance(
spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):
# For sparse or ragged tensor, we should only check the first
# dimension in order to get_next_as_optional. This is because
# when these tensors get batched by dataset only the batch dimension
# is set.
if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:
return False
else:
for component in nest.flatten(spec._component_specs): # pylint: disable=protected-access
if not component.shape.is_fully_defined():
return False
return True
class DistributedIteratorBase(DistributedIteratorInterface):
"""Common implementation for all input iterators."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers, iterators, strategy,
enable_get_next_as_optional):
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
def next(self):
return self.__next__()
def __next__(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def __iter__(self):
return self
def get_next_as_optional(self):
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=True)
def return_none():
return optional_ops.Optional.empty(self._element_spec)
return control_flow_ops.cond(
global_has_value, lambda: optional_ops.Optional.from_value(replicas),
return_none)
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
if not self._enable_get_next_as_optional:
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(
self._iterators[i].get_next_as_list_static_shapes(new_name))
return _create_per_replica(replicas, self._strategy)
out_of_range_replicas = []
def out_of_range_fn(worker_index, device):
"""This function will throw an OutOfRange error."""
# As this will be only called when there is no data left, so calling
# get_next() will trigger an OutOfRange error.
data = self._iterators[worker_index].get_next(device)
out_of_range_replicas.append(data)
return data
global_has_value, replicas = _get_next_as_optional(
self, self._strategy, return_per_replica=False)
results = []
for i, worker in enumerate(self._input_workers.worker_devices):
with ops.device(worker):
devices = self._input_workers.compute_devices_for_worker(i)
for j, device in enumerate(devices):
with ops.device(device):
# pylint: disable=undefined-loop-variable
# pylint: disable=cell-var-from-loop
# It is fine for the lambda to capture variables from the loop as
# the lambda is executed in the loop as well.
result = control_flow_ops.cond(
global_has_value,
lambda: replicas[i][j],
lambda: out_of_range_fn(i, device),
strict=True,
)
# pylint: enable=cell-var-from-loop
# pylint: enable=undefined-loop-variable
results.append(result)
replicas = results
return _create_per_replica(replicas, self._strategy)
class DistributedIteratorV1(DistributedIteratorBase):
"""Input Iterator for a distributed dataset."""
# We need a private initializer method for re-initializing multidevice
# iterators when used with Keras training loops. If we don't reinitialize the
# iterator we run into memory leak issues (b/123315763).
@property
def _initializer(self):
init_ops = []
for it in self._iterators:
init_ops.extend(it.initialize())
return control_flow_ops.group(init_ops)
@deprecated(None, "Use the iterator's `initializer` property instead.")
def initialize(self):
"""Initialize underlying iterators.
Returns:
A list of any initializer ops that should be run.
"""
return self._initializer
@property
def initializer(self):
"""Returns a list of ops that initialize the iterator."""
return self.initialize()
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_classes(self):
return self._iterators[0].output_classes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_shapes(self):
return self._iterators[0].output_shapes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_types(self):
return self._iterators[0].output_types
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
def get_iterator(self, worker):
for i, w in enumerate(self._input_workers.worker_devices):
if worker == w:
return self._iterators[i]
return None
@property
def element_spec(self):
"""The type specification of an element of this iterator."""
return self._element_spec
class DistributedDatasetAndIteratorSpec(type_spec.TypeSpec):
"""Common Type specification for `DistributedDataset and DistributedDatasetsFromFunction."""
__slots__ = [
"_input_workers", "_element_spec", "_strategy",
"_enable_get_next_as_optional", "_options"
]
def __init__(self,
input_workers,
element_spec,
strategy,
options,
enable_get_next_as_optional=None):
# We don't want to allow deserialization of this class because we don't
# serialize the strategy object. Currently the only places where
# _deserialize is called is when we save/restore using SavedModels.
if isinstance(input_workers, tuple):
raise NotImplementedError("DistributedIteratorSpec does not have support "
"for deserialization.")
else:
self._input_workers = input_workers
self._element_spec = element_spec
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
self._options = options
def _serialize(self):
# We cannot serialize the strategy object so we convert it to an id that we
# can use for comparison.
return (self._input_workers.serialize(), self._element_spec,
id(self._strategy), id(self._options))
def _deserialize(self):
raise ValueError(
f"Deserialization is currently unsupported for {type(self)}.")
def sanity_check_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
if self._input_workers.serialize() != other._input_workers.serialize():
raise ValueError("_input_workers is not compatible with both %s "
"and %s" % (self, other))
if self._strategy is not other._strategy:
raise ValueError("tf.distribute strategy is not compatible with both %s "
"and %s" % (self, other))
class DistributedIteratorSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedIterator`."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedIteratorSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedIterator
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, (input_device, compute_devices) in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(
_SingleWorkerDatasetIteratorSpec(input_device, compute_devices,
element_spec, self._options))
return specs
def _to_components(self, value):
return value._iterators # pylint: disable=protected-access
def _from_components(self, components):
return DistributedIterator(
input_workers=self._input_workers,
iterators=None,
components=components,
element_spec=self._element_spec,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedIteratorSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
def _with_tensor_ranks_only(self):
element_spec = nest.map_structure(
lambda s: s._with_tensor_ranks_only(), # pylint: disable=protected-access
self._element_spec)
return DistributedIteratorSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedIterator(DistributedIteratorBase,
composite_tensor.CompositeTensor):
"""Input Iterator for a distributed dataset."""
def __init__(self,
input_workers=None,
iterators=None,
strategy=None,
components=None,
element_spec=None,
enable_get_next_as_optional=False,
options=None):
if input_workers is None:
raise ValueError("`input_workers` should be "
"provided.")
error_message = ("Either `input_workers` or "
"both `components` and `element_spec` need to be "
"provided.")
self._options = options
if iterators is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._input_workers = input_workers
self._iterators = components
self._strategy = strategy
self._enable_get_next_as_optional = enable_get_next_as_optional
else:
if (components is not None and element_spec is not None):
raise ValueError(error_message)
super(DistributedIterator,
self).__init__(input_workers, iterators, strategy,
enable_get_next_as_optional)
@property
def element_spec(self):
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
# Note that we use actual element_spec instead of the rebatched-as-dynamic
# one to create DistributedIteratorSpec, to be consistent with the
# underlying iterators' specs.
return DistributedIteratorSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class _IterableInput(DistributedDatasetInterface):
"""Base class for iterable inputs for distribution strategies."""
# pylint: disable=super-init-not-called
def __init__(self, input_workers):
assert isinstance(input_workers, InputWorkers)
self._input_workers = input_workers
def __iter__(self):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, initial_state, reduce_fn):
"""Execute a `reduce_fn` over all the elements of the input."""
iterator = iter(self)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
def cond(has_data, data, state):
del data, state # Unused.
return has_data
def loop_body(has_data, data, state):
"""Executes `reduce_fn` in a loop till the dataset is empty."""
del has_data # Unused.
state = reduce_fn(state, data)
has_data, data = _get_next_as_optional(
iterator, self._strategy, return_per_replica=True)
return has_data, data, state
has_data, data, final_state = control_flow_ops.while_loop(
cond, loop_body, [has_data, data, initial_state], parallel_iterations=1)
return final_state
class DistributedDatasetSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDataset."""
def __init__(self, input_workers, element_spec, strategy,
enable_get_next_as_optional, options):
super(DistributedDatasetSpec,
self).__init__(input_workers, element_spec, strategy, options,
enable_get_next_as_optional)
@property
def value_type(self):
return DistributedDataset
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec)
return DistributedDatasetSpec(self._input_workers, element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
def _to_components(self, value):
return value._cloned_datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDataset(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetSpec(value._input_workers, value._element_spec,
value._strategy,
value._enable_get_next_as_optional,
value._options)
class DistributedDataset(_IterableInput, composite_tensor.CompositeTensor):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
input_workers,
strategy,
dataset=None,
num_replicas_in_sync=None,
input_context=None,
components=None,
element_spec=None,
enable_get_next_as_optional=None,
options=None):
"""Distribute the dataset on all workers.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
dataset: `tf.data.Dataset` that will be used as the input source. Either
dataset or components field should be passed when constructing
DistributedDataset. Use this when contructing DistributedDataset from a
new `tf.data.Dataset`. Use components when constructing using
DistributedDatasetSpec.
num_replicas_in_sync: Optional integer. If this is not None, the value
is used to decide how to rebatch datasets into smaller batches so that
the total batch size for each step (across all workers and replicas)
adds up to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
components: datasets when DistributedDataset is constructed from
DistributedDatasetSpec. Either field dataset or components should be
passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDataset and verified against element_spec from components.
enable_get_next_as_optional: this is required when components is passed
instead of dataset.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
"""
super(DistributedDataset, self).__init__(input_workers=input_workers)
if input_workers is None or strategy is None:
raise ValueError("input_workers and strategy are required arguments")
if dataset is not None and components is not None:
raise ValueError("Only one of dataset or components should be present")
if dataset is None and components is None:
raise ValueError("At least one of dataset or components should be passed")
if dataset is not None:
self._create_cloned_datasets_from_dataset(dataset, input_context,
input_workers, strategy,
num_replicas_in_sync)
else:
if enable_get_next_as_optional is None:
raise ValueError(
"When constructing DistributedDataset with components, " +
"enable_get_next_as_optional should also be passed")
self._cloned_datasets = components
self._enable_get_next_as_optional = enable_get_next_as_optional
self._input_workers = input_workers
self._strategy = strategy
self._options = options
if element_spec is not None:
if element_spec != _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec):
raise ValueError("Mismatched element_spec from the passed components")
self._element_spec = element_spec
else:
self._element_spec = _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec)
def _create_cloned_datasets_from_dataset(self, dataset, input_context,
input_workers, strategy,
num_replicas_in_sync):
# We clone and shard the dataset on each worker. The current setup tries to
# shard the dataset by files if possible so that each worker sees a
# different subset of files. If that is not possible, will attempt to shard
# the final input such that each worker will run the entire preprocessing
# pipeline and only receive its own shard of the dataset.
# Additionally, we rebatch the dataset on each worker into
# `num_replicas_in_sync` smaller batches to be distributed among that
# worker's replicas, so that the batch size for a global step (across all
# workers and replicas) adds up to the original dataset's batch size.
if num_replicas_in_sync is not None:
num_workers = input_context.num_input_pipelines if input_context else len(
input_workers.worker_devices)
rebatch_fn = self._make_rebatch_fn(dataset, num_workers,
num_replicas_in_sync)
else:
rebatch_fn = None
self._cloned_datasets = []
if input_context:
# Between-graph where we rely on the input_context for sharding
assert input_workers.num_workers == 1
if rebatch_fn is not None:
dataset = rebatch_fn(dataset, input_context.input_pipeline_id)
dataset = input_ops.auto_shard_dataset(dataset,
input_context.num_input_pipelines,
input_context.input_pipeline_id,
num_replicas_in_sync)
self._cloned_datasets.append(dataset)
else:
replicated_ds = distribute.replicate(dataset,
input_workers.worker_devices)
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
cloned_dataset = replicated_ds[worker]
cloned_dataset = cloned_dataset.with_options(dataset.options())
if rebatch_fn is not None:
cloned_dataset = rebatch_fn(cloned_dataset, i)
cloned_dataset = input_ops.auto_shard_dataset(
cloned_dataset, len(input_workers.worker_devices), i,
num_replicas_in_sync)
self._cloned_datasets.append(cloned_dataset)
self._enable_get_next_as_optional = _enable_get_next_as_optional(
strategy, dataset)
def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync):
"""Returns a callable that rebatches the input dataset.
Args:
dataset: A `tf.data.Dataset` representing the dataset to be distributed.
num_workers: An integer representing the number of workers to distribute
`dataset` among.
num_replicas_in_sync: An integer representing the number of replicas in
sync across all workers.
"""
if num_replicas_in_sync % num_workers:
raise ValueError(
"tf.distribute expects every worker to have the same number of "
"replicas. However, encountered `num_replicas_in_sync` ({}) that "
"cannot be divided by `num_workers` ({})".format(
num_replicas_in_sync, num_workers))
num_replicas_per_worker = num_replicas_in_sync // num_workers
with ops.colocate_with(dataset._variant_tensor): # pylint: disable=protected-access
batch_size = distribute.compute_batch_size(dataset)
def rebatch_fn(dataset, worker_index):
try:
# pylint: disable=protected-access
def apply_rebatch():
batch_sizes = distribute.batch_sizes_for_worker(
batch_size, num_workers, num_replicas_per_worker, worker_index)
return distribute._RebatchDataset(
dataset, batch_sizes).prefetch(num_replicas_per_worker)
def apply_legacy_rebatch():
return distribute._LegacyRebatchDataset(
dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker)
with ops.colocate_with(dataset._variant_tensor):
return control_flow_ops.cond(
math_ops.not_equal(batch_size, -1),
true_fn=apply_rebatch,
false_fn=apply_legacy_rebatch)
except errors.InvalidArgumentError as e:
if "without encountering a batch" in str(e):
six.reraise(
ValueError,
ValueError(
"Call the `batch` method on the input Dataset in order to be "
"able to split your input across {} replicas.\n Please see "
"the tf.distribute.Strategy guide. {}".format(
num_replicas_in_sync, e)),
sys.exc_info()[2])
else:
raise
return rebatch_fn
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy MultiDeviceIterators
# as a stop gap solution that will allow us to roll out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers,
enable_legacy_iterators,
self._options)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
self._input_workers,
worker_iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetSpec(self._input_workers, self._element_spec,
self._strategy,
self._enable_get_next_as_optional,
self._options)
class DistributedDatasetV1(DistributedDataset):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None,
options=None):
self._input_workers = input_workers
super(DistributedDatasetV1, self).__init__(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context,
options=options)
def make_one_shot_iterator(self):
"""Get a one time use iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use `for ... in dataset:` to iterate
over the dataset or `iter` to create an iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for DistributedDatasetV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def make_initializable_iterator(self):
"""Get an initializable iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use
`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an
initializable iterator.
Returns:
A DistributedIteratorV1 instance.
"""
return self._make_initializable_iterator()
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=unused-argument
"""Get an initializable iterator for DistributedDatasetV1."""
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _get_iterator(self):
worker_iterators = _create_iterators_per_worker(self._cloned_datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, worker_iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self.element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
class DistributedDatasetsFromFunctionSpec(DistributedDatasetAndIteratorSpec):
"""Type specification for `DistributedDatasetsFromFunction."""
def __init__(self, input_workers, element_spec, strategy, options):
super(DistributedDatasetsFromFunctionSpec,
self).__init__(input_workers, element_spec, strategy, options)
@property
def value_type(self):
return DistributedDatasetsFromFunction
@property
def _component_specs(self):
specs = []
worker_device_pairs = self._input_workers._worker_device_pairs # pylint: disable=protected-access
for i, _ in enumerate(worker_device_pairs):
element_spec = nest.map_structure(
functools.partial(_replace_per_replica_spec, i=i), self._element_spec)
specs.append(dataset_ops.DatasetSpec(element_spec))
return specs
# Overriding this method so that we can merge and reconstruct the spec object
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# pylint: disable=protected-access
self.sanity_check_type(other)
element_spec = nest.map_structure(
lambda a, b: a.most_specific_compatible_type(b), self._element_spec,
other._element_spec) # pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(self._input_workers,
element_spec, self._strategy,
self._options)
def _to_components(self, value):
return value._datasets # pylint: disable=protected-access
def _from_components(self, components):
return DistributedDatasetsFromFunction(
input_workers=self._input_workers,
strategy=self._strategy,
components=components,
element_spec=self._element_spec,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return DistributedDatasetsFromFunctionSpec(
input_workers=value._input_workers,
element_spec=value._element_spec,
strategy=value._strategy,
options=value._options)
# TODO(priyag): Add other replication modes.
class DistributedDatasetsFromFunction(_IterableInput,
composite_tensor.CompositeTensor):
"""Inputs created from dataset function."""
def __init__(self,
input_workers,
strategy,
input_contexts=None,
dataset_fn=None,
options=None,
components=None,
element_spec=None):
"""Makes an iterable from datasets created by the given function.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `dataset_fn`. Length and order should match worker order in
`worker_device_pairs`.
dataset_fn: A function that returns a `Dataset` given an `InputContext`.
Either dataset_fn or components should be passed to construct
DistributedDatasetsFromFunction. Use this when contructing
DistributedDataset using a function. Use components when constructing
using DistributedDatasetsFromFunctionSpec.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
components: datasets when DistributedDatasetsFromFunction is constructed
from DistributedDatasetsFromFunctionSpec. Only one of dataset or
components should be passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDatasetsFromFunctionSpec and verified against element_spec
from components.
"""
super(DistributedDatasetsFromFunction, self).__init__(
input_workers=input_workers)
self._input_workers = input_workers
self._strategy = strategy
self._options = options
if dataset_fn is not None and components is not None:
raise ValueError("Only one of dataset_fn or components should be set")
if dataset_fn is None and components is None:
raise ValueError("At least one of dataset_fn or components should be set")
if dataset_fn is not None:
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
self._datasets, element_spec = (
_create_datasets_from_function_with_input_context(
input_contexts, self._input_workers, dataset_fn))
self._element_spec = _create_distributed_tensor_spec(
self._strategy, element_spec)
else:
if element_spec is None:
raise ValueError(
"element_spec should also be passed when passing components")
self._element_spec = element_spec
self._datasets = components
self._enable_get_next_as_optional = _enable_get_next_as_optional(
self._strategy, self._datasets[0])
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
# This is an optional flag that can be used to turn off using
# OwnedMultiDeviceIterators and instead use the legacy
# MultiDeviceIterators as a stop gap solution that will allow us to roll
# out this change.
enable_legacy_iterators = getattr(self._strategy,
"_enable_legacy_iterators", False)
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers,
enable_legacy_iterators,
self._options)
if enable_legacy_iterators:
iterator = DistributedIteratorV1(
self._input_workers,
iterators,
self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional)
else:
iterator = DistributedIterator(
input_workers=self._input_workers,
iterators=iterators,
strategy=self._strategy,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync
# point here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetsFromFunctionSpec(self._input_workers,
self._element_spec,
self._strategy, self._options)
class DistributedDatasetsFromFunctionV1(DistributedDatasetsFromFunction):
"""Inputs created from dataset function."""
def _make_initializable_iterator(self, shared_name=None):
"""Get an initializable iterator for DistributedDatasetsFromFunctionV1."""
del shared_name # Unused
# Eager mode generates already initialized iterators. Hence we cannot create
# an initializable iterator.
if context.executing_eagerly():
raise ValueError("Cannot create initializable iterator in Eager mode. "
"Please use `iter()` instead.")
return self._get_iterator()
def _make_one_shot_iterator(self):
"""Get an iterator for iterating over DistributedDatasetsFromFunctionV1."""
# Graph mode with one shot iterator is disabled because we have to call
# `initialize` on the iterator which is only required if we are using a
# tf.distribute strategy.
if not context.executing_eagerly():
raise ValueError("Cannot create a one shot iterator. Please use "
"`make_initializable_iterator()` instead.")
return self._get_iterator()
def _get_iterator(self):
iterators = _create_iterators_per_worker(self._datasets,
self._input_workers, True,
self._options)
iterator = DistributedIteratorV1(self._input_workers, iterators,
self._strategy,
self._enable_get_next_as_optional)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
def __iter__(self):
if (ops.executing_eagerly_outside_functions() or
ops.get_default_graph().building_function):
return self._get_iterator()
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
# TODO(anjalisridhar): This class will be soon removed in favor of newer
# APIs.
class InputFunctionIterator(DistributedIteratorV1):
"""Iterator created from input function."""
def __init__(self, input_fn, input_workers, input_contexts, strategy):
"""Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
"""
assert isinstance(input_workers, InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, dataset_ops.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError(
"input_fn must return a tf.data.Dataset or a callable.")
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(
input_workers, iterators, strategy, enable_get_next_as_optional=False)
self._enable_get_next_as_optional = False
# TODO(anjalisridhar): This class will soon be removed and users should move
# to using DistributedIterator.
class DatasetIterator(DistributedIteratorV1):
"""Iterator created from input dataset."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None):
"""Make an iterator for the dataset on given devices.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that the
total batch size for each step (across all workers and replicas) adds up
to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
dist_dataset = DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
worker_iterators = _create_iterators_per_worker(
dist_dataset._cloned_datasets, input_workers, True) # pylint: disable=protected-access
super(DatasetIterator,
self).__init__(input_workers, worker_iterators, strategy,
dist_dataset._enable_get_next_as_optional) # pylint: disable=protected-access
self._element_spec = dist_dataset.element_spec
def _dummy_tensor_fn(value_structure):
"""A function to create dummy tensors from `value_structure`."""
def create_dummy_tensor(spec):
"""Create a dummy tensor with possible batch dimensions set to 0."""
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Splice out the ragged dimensions.
# pylint: disable=protected-access
feature_shape = spec._shape[:1].concatenate(
spec._shape[(1 + spec._ragged_rank):])
feature_type = spec._dtype
# pylint: enable=protected-access
else:
feature_shape = spec.shape
feature_type = spec.dtype
# Ideally we should set the batch dimension to 0, however as in
# DistributionStrategy we don't know the batch dimension, we try to
# guess it as much as possible. If the feature has unknown dimensions, we
# will set them to 0. If the feature shape is already static, we guess the
# first dimension as batch dimension and set it to 0.
dims = ([dim if dim is not None else 0 for dim in feature_shape.as_list()]
if feature_shape else [])
if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or
feature_shape.is_fully_defined()):
dims[0] = tensor_shape.Dimension(0)
if isinstance(spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensor(
values=array_ops.zeros(0, feature_type),
indices=array_ops.zeros((0, len(dims)), dtypes.int64),
dense_shape=dims)
# Create the dummy tensor.
dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type)
if isinstance(spec, ragged_tensor.RaggedTensorSpec):
# Reinsert the ragged dimensions with size 0.
# pylint: disable=protected-access
row_splits = array_ops.zeros(1, spec._row_splits_dtype)
dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(
dummy_tensor, (row_splits,) * spec._ragged_rank, validate=False)
# pylint: enable=protected-access
return dummy_tensor
return nest.map_structure(create_dummy_tensor, value_structure)
def _recover_shape_fn(data, value_structure):
"""Recover the shape of `data` the same as shape of `value_structure`."""
flattened_data = nest.flatten(data)
for i, spec in enumerate(nest.flatten(value_structure)):
for target, source in zip(
nest.flatten(flattened_data[i], expand_composites=True),
nest.flatten(spec, expand_composites=True)):
target.set_shape(source.shape)
# `SparseTensor` shape is not determined by the shape of its component
# tensors. Rather, its shape depends on a tensor's values.
if isinstance(spec, sparse_tensor.SparseTensorSpec) and spec.shape:
dense_shape = spec.shape
with ops.device(flattened_data[i].op.device):
# For partially defined shapes, fill in missing values from tensor.
if not dense_shape.is_fully_defined():
dense_shape = array_ops.stack([
flattened_data[i].dense_shape[j] if dim is None else dim
for j, dim in enumerate(dense_shape.as_list())
])
flattened_data[i] = sparse_tensor.SparseTensor(
indices=flattened_data[i].indices,
values=flattened_data[i].values,
dense_shape=dense_shape)
data = nest.pack_sequence_as(data, flattened_data)
return data
class _SingleWorkerDatasetIteratorBase(object):
"""Iterator for a single `tf.data.Dataset`."""
def __init__(self, dataset, worker, devices, options=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
A `MultiDeviceIterator` or `OwnedMultiDeviceIterator` is used to prefetch
input to the devices on the given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
options: options.
"""
self._dataset = dataset
self._worker = worker
self._devices = devices
self._element_spec = dataset.element_spec
self._options = options
self._make_iterator()
def _make_iterator(self):
raise NotImplementedError("must be implemented in descendants")
def _format_data_list_with_options(self, data_list):
"""Change the data in to a list type if required.
The OwnedMultiDeviceIterator returns the list data type,
while the PER_REPLICA iterator (when used with prefetch disabled)
returns without the enclosed list. This is to fix the inconsistency.
Args:
data_list: data_list
Returns:
list
"""
if (self._options and self._options.experimental_replication_mode ==
InputReplicationMode.PER_REPLICA and
not self._options.experimental_fetch_to_device):
return [data_list]
else:
return data_list
def get_next(self, device, name=None):
"""Get next element for the given device."""
del name
with ops.device(self._worker):
if _should_use_multi_device_iterator(self._options):
return self._iterator.get_next(device)
else:
return self._iterator.get_next()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), it is considerably faster than get_next_as_list()
(but can only be used when the shapes are static).
Args:
name: not used.
Returns:
A list consisting of the next data from each device.
"""
del name
with ops.device(self._worker):
return self._format_data_list_with_options(self._iterator.get_next())
def get_next_as_list(self, name=None):
"""Get next element from underlying iterator.
If there is no data left, a list of dummy tensors with possible batch
dimensions set to 0 will be returned. Use of get_next_as_optional() and
extra logic adds overhead compared to get_next_as_list_static_shapes(), but
allows us to handle non-static shapes.
Args:
name: not used.
Returns:
A boolean tensor indicates whether there is any data in next element and
the real data as the next element or a list of dummy tensors if no data
left.
"""
del name
with ops.device(self._worker):
data_list = self._format_data_list_with_options(
self._iterator.get_next_as_optional())
result = []
for i, data in enumerate(data_list):
# Place the condition op in the same device as the data so the data
# doesn't need to be sent back to the worker.
with ops.device(self._devices[i]):
# Data will be fetched in order, so we only need to check if the first
# replica has value to see whether there is data left for this single
# worker.
if i == 0:
worker_has_value = data.has_value()
# pylint: disable=unnecessary-lambda
# pylint: disable=cell-var-from-loop
real_data = control_flow_ops.cond(
data.has_value(),
lambda: data.get_value(),
lambda: _dummy_tensor_fn(data.element_spec),
strict=True,
)
# Some dimensions in `replicas` will become unknown after we
# conditionally return the real tensors or the dummy tensors. Recover
# the shapes from `data.element_spec`. We only need to do this in
# non eager mode because we always know the runtime shape of the
# tensors in eager mode.
if not context.executing_eagerly():
real_data = _recover_shape_fn(real_data, data.element_spec)
result.append(real_data)
# pylint: enable=cell-var-from-loop
# pylint: enable=unnecessary-lambda
return worker_has_value, result
class _SingleWorkerDatasetIteratorSpec(type_spec.TypeSpec):
"""Type specification for `_SingleWorkerOwnedDatasetIterator`."""
__slots__ = ["_worker", "_devices", "_element_spec", "_options"]
def __init__(self, worker, devices, element_spec, options):
self._worker = worker
self._devices = tuple(device_util.canonicalize(d) for d in devices)
self._element_spec = element_spec
self._options = options
@property
def value_type(self):
return _SingleWorkerOwnedDatasetIterator
def _serialize(self):
return (self._worker, self._devices, self._element_spec, self._options)
@property
def _component_specs(self):
specs = []
if _should_use_multi_device_iterator(self._options):
specs.append(
multi_device_iterator_ops.MultiDeviceIteratorSpec(
self._devices, self._worker, element_spec=self._element_spec))
else:
specs.append(iterator_ops.IteratorSpec(element_spec=self._element_spec))
return specs
def _to_components(self, value):
return [value._iterator] # pylint: disable=protected-access
def _from_components(self, components):
return _SingleWorkerOwnedDatasetIterator(
dataset=None,
worker=self._worker,
devices=self._devices,
components=components,
element_spec=self._element_spec,
options=self._options)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return _SingleWorkerDatasetIteratorSpec(value._worker, value._devices,
value._element_spec, value._options)
class _SingleWorkerOwnedDatasetIterator(_SingleWorkerDatasetIteratorBase,
composite_tensor.CompositeTensor):
"""Iterator for a DistributedDataset instance."""
def __init__(self,
dataset=None,
worker=None,
devices=None,
components=None,
element_spec=None,
options=None):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the
given worker. The lifetime of this iterator is tied to the encompassing
python object. Once we go out of scope of the python object or return from
a tf.function the underlying iterator resource is deleted.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
components: Tensor components to construct the
_SingleWorkerOwnedDatasetIterator from.
element_spec: A nested structure of `TypeSpec` objects that represents the
type specification of elements of the iterator.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
"""
if worker is None or devices is None:
raise ValueError("Both `worker` and `devices` should be provided")
error_message = ("Either `dataset` or both `components` and `element_spec` "
"need to be provided.")
self._options = options
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._worker = worker
self._devices = devices
self._iterator = components[0]
else:
if (components is not None or element_spec is not None):
raise ValueError(error_message)
super(_SingleWorkerOwnedDatasetIterator,
self).__init__(dataset, worker, devices, self._options)
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
if not self._worker:
raise ValueError("Worked device must be specified when creating an "
"owned iterator.")
if _should_use_multi_device_iterator(self._options):
host_device = device_util.get_host_for_device(self._worker)
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset,
self._devices,
source_device=host_device,
max_buffer_size=self._options
.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
self._dataset, self._devices, source_device=host_device)
else:
with ops.device(self._worker):
self._iterator = iter(self._dataset)
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return _SingleWorkerDatasetIteratorSpec(self._worker, self._devices,
self._element_spec, self._options)
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._element_spec)
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._element_spec)
class _SingleWorkerDatasetIterator(_SingleWorkerDatasetIteratorBase):
"""Iterator for a single DistributedDatasetV1 instance."""
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
if self._options is not None:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
max_buffer_size=self._options.experimental_per_replica_buffer_size,
prefetch_buffer_size=self._options
.experimental_per_replica_buffer_size)
else:
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset,
self._devices,
)
def initialize(self):
"""Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
class _SingleWorkerCallableIterator(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next(self, device, name=None):
"""Get next element for the given device from the callable."""
del device, name
with ops.device(self._worker):
return self._fn()
def get_next_as_list_static_shapes(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return constant_op.constant(True), data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
def _create_iterators_per_worker(worker_datasets,
input_workers,
enable_legacy_iterators,
options=None):
"""Create a multidevice iterator on each of the workers."""
assert isinstance(input_workers, InputWorkers)
assert len(worker_datasets) == len(input_workers.worker_devices)
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
if tf2.enabled() and not enable_legacy_iterators:
iterator = _SingleWorkerOwnedDatasetIterator(
dataset=worker_datasets[i],
worker=worker,
devices=worker_devices,
options=options)
else:
iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker,
worker_devices, options)
iterators.append(iterator)
return iterators
def _create_datasets_from_function_with_input_context(input_contexts,
input_workers,
dataset_fn):
"""Create device datasets per worker given a dataset function."""
datasets = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
dataset = dataset_fn(ctx)
datasets.append(dataset)
return datasets, dataset.element_spec
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_batched_dataset(d):
"""Get the batched dataset from `d`."""
# pylint: disable=protected-access
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, (dataset_ops.PrefetchDataset,
dataset_ops._OptionsDataset)):
return _get_batched_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
def _get_batched_dataset_attributes(d):
"""Get `batch_size`, `drop_remainder` of dataset."""
# pylint: disable=protected-access
assert isinstance(d,
(dataset_ops.BatchDataset, batching._MapAndBatchDataset))
if isinstance(d, dataset_ops.BatchDataset):
batch_size = d._batch_size
drop_remainder = d._drop_remainder
elif isinstance(d, batching._MapAndBatchDataset):
batch_size = d._batch_size_t
drop_remainder = d._drop_remainder_t
# pylint: enable=protected-access
if tensor_util.is_tf_type(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tf_type(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
return batch_size, drop_remainder
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_attributes(dataset):
"""Get the underlying attributes from the dataset object."""
# pylint: disable=protected-access
# First, get batch_size and drop_remainder from the dataset. We need
# to walk back the dataset creation process and find the batched version in
# order to get the attributes.
batched_dataset = _get_batched_dataset(dataset)
batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)
# Second, prefetch buffer should be get from the original dataset.
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
return batch_size, drop_remainder, prefetch_buffer
def _should_use_multi_device_iterator(options):
"""Determine whether to use multi_device_iterator_ops."""
if (options is None or
options.experimental_replication_mode == InputReplicationMode.PER_WORKER
or
(options.experimental_replication_mode == InputReplicationMode.PER_REPLICA
and options.experimental_fetch_to_device)):
return True
return False
class MultiStepContext(object):
"""A context object that can be used to capture things when running steps.
This context object is useful when running multiple steps at a time using the
`experimental_run_steps_on_iterator` API. For e.g. it allows the user's step
function to specify which outputs to emit at what frequency. Currently it
supports capturing output from the last step, as well as capturing non tensor
outputs. In the future it will be augmented to support other use cases such
as output each N steps.
"""
def __init__(self):
"""Initialize an output context.
Returns:
A context object.
"""
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {}
@property
def last_step_outputs(self):
"""A dictionary consisting of outputs to be captured on last step.
Keys in the dictionary are names of tensors to be captured, as specified
when `set_last_step_output` is called.
Values in the dictionary are the tensors themselves. If
`set_last_step_output` was called with a `reduce_op` for this output,
then the value is the reduced value.
Returns:
A dictionary with last step outputs.
"""
return self._last_step_outputs
def _set_last_step_outputs(self, outputs):
"""Replace the entire dictionary of last step outputs."""
if not isinstance(outputs, dict):
raise ValueError("Need a dictionary to set last_step_outputs.")
self._last_step_outputs = outputs
def set_last_step_output(self, name, output, reduce_op=None):
"""Set `output` with `name` to be outputted from the last step.
Args:
name: String, name to identify the output. Doesn't need to match tensor
name.
output: The tensors that should be outputted with `name`. See below for
actual types supported.
reduce_op: Reduction method to use to reduce outputs from multiple
replicas. Required if `set_last_step_output` is called in a replica
context. Optional in cross_replica_context.
When present, the outputs from all the replicas are reduced using the
current distribution strategy's `reduce` method. Hence, the type of
`output` must be what's supported by the corresponding `reduce` method.
For e.g. if using MirroredStrategy and reduction is set, output
must be a `PerReplica` value.
The reduce method is also recorded in a dictionary
`_last_step_outputs_reduce_ops` for later interpreting of the
outputs as already reduced or not.
"""
if distribution_strategy_context.in_cross_replica_context():
self._last_step_outputs_reduce_ops[name] = reduce_op
if reduce_op is None:
self._last_step_outputs[name] = output
else:
distribution = distribution_strategy_context.get_strategy()
self._last_step_outputs[name] = distribution.reduce(reduce_op, output,
axis=None)
else:
assert reduce_op is not None
def merge_fn(distribution, value):
self._last_step_outputs[name] = distribution.reduce(reduce_op, value,
axis=None)
# Setting this inside the `merge_fn` because all replicas share the same
# context object, so it's more robust to set it only once (even if all
# the replicas are trying to set the same value).
self._last_step_outputs_reduce_ops[name] = reduce_op
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
@property
def non_tensor_outputs(self):
"""A dictionary consisting of any non tensor outputs to be captured."""
return self._non_tensor_outputs
def set_non_tensor_output(self, name, output):
"""Set `output` with `name` to be captured as a non tensor output."""
if distribution_strategy_context.in_cross_replica_context():
self._non_tensor_outputs[name] = output
else:
def merge_fn(distribution, value):
# NOTE(priyag): For non tensor outputs, we simply return all the values
# in a list as reduction doesn't make sense on non tensors.
self._non_tensor_outputs[name] = (
distribution.experimental_local_results(value))
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
def _create_distributed_tensor_spec(strategy, tensor_spec):
"""Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`.
Args:
strategy: The given `tf.distribute` strategy.
tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the
shape should be None if you have partial batches.
Returns:
A `tf.TypeSpec` that matches the values produced by a given strategy. This
can be a `tf.TensorSpec` or a `PerRelicaSpec`.
"""
num_replicas = len(strategy.extended.worker_devices)
# For one device strategy that is not MultiWorkerMirroredStrategy, return the
# tensor_spec as is, since we don't wrap the output with PerReplica in this
# case.
# TODO(b/166464552): remove after we always wrap for all strategies.
if not _always_wrap(strategy):
return tensor_spec
# For other cases we assume the input to tf.function is a per replica type.
def _get_value_per_replica(tensor_spec_per_input):
value_specs = [tensor_spec_per_input for _ in range(num_replicas)]
return values.PerReplicaSpec(*value_specs)
return nest.map_structure(_get_value_per_replica, tensor_spec)
def _replace_per_replica_spec(spec, i):
"""If `spec` is a `PerReplicaSpec`, then return its `i`th value_spec."""
if isinstance(spec, values.PerReplicaSpec):
return spec._value_specs[i] # pylint: disable=protected-access
else:
return spec
def _enable_get_next_as_optional(strategy, dataset):
"""Returns whether to enable using partial batch handling."""
# TODO(b/133073708): we currently need a flag to control the usage because
# there is a performance difference between get_next() and
# get_next_as_optional(). And we only enable get_next_as_optional when the
# output shapes are not static.
#
# TODO(rxsang): We want to always enable the get_next_as_optional behavior
# when user passed input_fn instead of dataset.
if not getattr(strategy.extended, "experimental_enable_get_next_as_optional",
False):
return False
if context.executing_eagerly():
# If the dataset is infinite, we don't need to enable last partial batch
# support. Currently the logic only applies to the case that distributed
# dataset is created in eager mode, as we need to evaluate the dataset
# cardinality.
with ops.device(dataset._variant_tensor.device): # pylint: disable=protected-access
if dataset.cardinality().numpy() == cardinality.INFINITE:
return False
return not _is_statically_shaped(
dataset.element_spec) or strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _create_per_replica(value_list, strategy):
"""Creates a PerReplica.
For strategies other than OneDeviceStrategy, it creates a PerReplica whose
type spec is set to the element spec of the dataset. This helps avoid
retracing for partial batches. Retracing is problematic for multi client when
different client retraces different time, since retracing changes the
collective keys in the tf.function, and causes mismatches among clients.
For single client strategies, this simply calls distribute_utils.regroup().
Args:
value_list: a list of values, one for each replica.
strategy: the `tf.distribute.Strategy`.
Returns:
a structure of PerReplica.
"""
# TODO(b/166464552): always wrap for all one device strategies as well.
always_wrap = _always_wrap(strategy)
per_replicas = distribute_utils.regroup(value_list, always_wrap=always_wrap)
return per_replicas
def _always_wrap(strategy):
"""Returns whether to always wrap the values in a DistributedValues."""
return strategy.extended._in_multi_worker_mode() or len( # pylint: disable=protected-access
strategy.extended.worker_devices) > 1
def _rebatch_as_dynamic(per_replica_spec):
"""Rebatch the spec to have a dynamic batch dimension."""
assert isinstance(per_replica_spec, values.PerReplicaSpec), per_replica_spec
# pylint: disable=protected-access
def _rebatch(spec):
# Rebatch if possible.
try:
return spec._unbatch()._batch(None)
except ValueError:
pass
return spec
return values.PerReplicaSpec(
*nest.map_structure(_rebatch, per_replica_spec._value_specs))
# pylint: enable=protected-access
| apache-2.0 | 2,909,316,437,415,284,700 | 39.799839 | 110 | 0.663353 | false | 4.231346 | false | false | false |
dellytools/maze | readfq.py | 1 | 1674 | # source: https://github.com/lh3/readfq
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
if __name__ == "__main__":
import sys
n, slen, qlen = 0, 0, 0
for name, seq, qual in readfq(sys.stdin):
n += 1
slen += len(seq)
qlen += qual and len(qual) or 0
print n, '\t', slen, '\t', qlen
| mit | -3,479,504,749,326,615,600 | 39.829268 | 74 | 0.492832 | false | 3.948113 | false | false | false |
hello-base/web | apps/merchandise/music/managers.py | 1 | 1125 | # -*- coding: utf-8 -*-
from django.db import models
from django.db.models.query import QuerySet
class EditionManager(models.Manager):
def find_edition(self, release, edition, **kwargs):
if release:
kwargs[release.identifier] = release
if edition:
kwargs[edition.parent.identifier] = edition.parent
qs = super(EditionManager, self).get_queryset().order_by('released', 'romanized_name')
try:
return qs.filter(**kwargs)[0]
except IndexError:
return qs.none()
def primary_edition(self, release=None, edition=None):
editions = [self.model.EDITIONS.regular, self.model.EDITIONS.limited, self.model.EDITIONS.digital]
for kind in editions:
edition = self.find_edition(release, edition, kind=kind)
if edition:
return edition
return None
class TrackQuerySet(QuerySet):
def originals(self):
return self.filter(original_track__isnull=True)
class TrackOrderQuerySet(QuerySet):
def original_only(self):
return self.filter(is_instrumental=False)
| apache-2.0 | 4,398,973,060,987,976,000 | 30.25 | 106 | 0.648 | false | 3.933566 | false | false | false |
ndronen/pylearnutils | pylearnutils/datasets/sparse_expander.py | 1 | 7801 | # From https://gist.github.com/ccsevers/10295174
import os.path
import numpy as np
from .utils import take_subset
from pylearn2.datasets.dataset import Dataset
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.utils.iteration import (SequentialSubsetIterator,
FiniteDatasetIterator,
resolve_iterator_class)
import functools
import logging
import numpy
import warnings
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace, IndexSpace
from pylearn2.utils import safe_zip
try:
import scipy.sparse
except ImportError:
warnings.warn("Couldn't import scipy.sparse")
import theano
import gzip
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
class SparseExpanderDataset(Dataset):
"""
SparseExpanderDataset takes a numpy/scipy sparse matrix and calls .todense()
as the batches are passed out of the iterator.
"""
def __init__(self, X_path=None, y_path=None, from_scipy_sparse_dataset=None, zipped_npy=False, means_path=None, stds_path=None, start_fraction=None, end_fraction=None, start=None, stop=None):
self.X_path = X_path
self.y_path = y_path
if self.X_path != None:
if zipped_npy == True:
logger.info('... loading sparse data set from a zip npy file')
self.X = scipy.sparse.csr_matrix(
numpy.load(gzip.open(X_path)), dtype=floatX)
else:
logger.info('... loading sparse data set from a npy file')
self.X = scipy.sparse.csr_matrix(
numpy.load(X_path).item(), dtype=floatX)
else:
logger.info('... building from given sparse dataset')
self.X = from_scipy_sparse_dataset.astype(floatX)
if self.y_path != None:
if zipped_npy == True:
logger.info('... loading sparse data set from a zip npy file')
#self.y = scipy.sparse.csr_matrix(
# numpy.load(gzip.open(y_path)), dtype=floatX).todense()
self.y = numpy.load(gzip.open(y_path))
if not isinstance(self.y, np.ndarray):
print("calling y.item")
self.y = y.item()
else:
logger.info('... loading sparse data set from a npy file')
self.y = numpy.load(y_path)
if not isinstance(self.y, np.ndarray):
print("calling y.item")
self.y = self.y.item()
# We load y as a sparse matrix, but convert it to a dense array,
# because otherwise MLP.mean_of_targets breaks.
orig_shape = self.y.shape
if scipy.sparse.issparse(self.y):
self.y = np.asarray(self.y.todense())
# Only make this a column vector if it's not one-hot.
if 1 in orig_shape or len(orig_shape) == 1:
nrow = np.max(orig_shape)
self.y = self.y.reshape((nrow, 1))
else:
self.y = None
self.y = self.y.astype(floatX)
self.X, self.y = take_subset(self.X, self.y,
start_fraction, end_fraction, start, stop)
self.data_n_rows = self.X.shape[0]
self.num_examples = self.data_n_rows
self.fancy = False
self.stochastic = False
X_space = VectorSpace(dim=self.X.shape[1])
X_source = 'features'
if y_path is None:
space = X_space
source = X_source
else:
if self.y.ndim == 1:
dim = 1
else:
dim = self.y.shape[-1]
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
if means_path is not None:
self.means = np.load(means_path)
if stds_path is not None:
self.stds = np.load(stds_path)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (self.X_space, 'features')
def get_design_matrix(self):
return self.X
def get_batch_design(self, batch_size, include_labels=False):
"""
method inherited from Dataset
"""
self.iterator(mode='sequential', batch_size=batch_size)
return self.next()
def get_batch_topo(self, batch_size):
"""
method inherited from Dataset
"""
raise NotImplementedError('Not implemented for sparse dataset')
def get_data_specs(self):
"""
Returns the data_specs specifying how the data is internally stored.
This is the format the data returned by `self.get_data()` will be.
"""
return self.data_specs
def get_data(self):
"""
Returns
-------
data : numpy matrix or 2-tuple of matrices
Returns all the data, as it is internally stored.
The definition and format of these data are described in
`self.get_data_specs()`.
"""
if self.y is None:
return self.X
else:
return (self.X, self.y)
def get_num_examples(self):
return self.X.shape[0]
@functools.wraps(Dataset.iterator)
def iterator(self, mode=None, batch_size=None, num_batches=None,
topo=None, targets=None, rng=None, data_specs=None,
return_tuple=False):
"""
method inherited from Dataset
"""
self.mode = mode
self.batch_size = batch_size
self._targets = targets
self._return_tuple = return_tuple
if data_specs is None:
data_specs = self._iter_data_specs
# If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator
# can return.
# if
space, source = data_specs
if isinstance(space, CompositeSpace):
sub_spaces = space.components
sub_sources = source
else:
sub_spaces = (space,)
sub_sources = (source,)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
if src == 'features':
conv_fn = lambda x: x.todense()
elif src == 'targets':
conv_fn = lambda x: x
else:
conv_fn = None
convert.append(conv_fn)
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
return FiniteDatasetIterator(self,
mode(self.X.shape[0],
batch_size,
num_batches,
rng),
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
def __iter__(self):
return self
def next(self):
indx = self.subset_iterator.next()
try:
rval = self.X[indx].todense()
if self.center:
rval = rval - self.means
if self.scale:
rval = rval / self.stds
except IndexError:
# the ind of minibatch goes beyond the boundary
import ipdb; ipdb.set_trace()
rval = tuple(rval)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
| bsd-3-clause | -6,055,127,632,347,842,000 | 32.337607 | 195 | 0.542879 | false | 4.138462 | false | false | false |
watson-developer-cloud/python-primer-companion-code | episode-2/flask/src/translation.py | 1 | 2054 | # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from watson_developer_cloud import LanguageTranslationV2 as LanguageTranslationService
def getTranslationService():
return LanguageTranslationService(username='<your username key for the Watson language translation service>',
password='<your password key for the service>')
def identifyLanguage(app, data):
txt = data.encode("utf-8", "replace")
language_translation = getTranslationService()
langsdetected = language_translation.identify(txt)
app.logger.info(json.dumps(langsdetected, indent=2))
primarylang = langsdetected["languages"][0]
retData = {key: primarylang[key] for key in ('language', 'confidence')}
app.logger.info(json.dumps(retData, indent=2))
return retData
def checkForTranslation(app, fromlang, tolang):
supportedModels = []
lt = getTranslationService()
models = lt.list_models()
modelList = models.get("models")
supportedModels = [model['model_id'] for model in modelList
if fromlang == model['source']
and tolang == model['target']]
return supportedModels
def performTranslation(app, txt, primarylang, targetlang):
lt = getTranslationService()
translation = lt.translate(txt, source=primarylang, target=targetlang)
theTranslation = None
if translation and ("translations" in translation):
theTranslation = translation['translations'][0]['translation']
return theTranslation
| apache-2.0 | -5,859,510,575,257,219,000 | 36.345455 | 111 | 0.729309 | false | 4.243802 | false | false | false |
justanr/pyxl | pyxl.py | 1 | 13205 | '''
This simple module consists of the Pyxl class and a few helper functions.
'''
from os.path import basename, join
from glob import glob
from PIL import Image, ImageDraw, ImageFont
#import flickrapi
#Helper functions.
def buildHex(hexStr):
'''
Accepts a supposed hex color string and ensures it's 6 characters long.
'''
hexStr = hexStr.lower().replace(' ','').replace('#','')
#TODO: Make this prettier.
if len(hexStr) == 1:
return hexStr * 6
elif len(hexStr) == 2:
return hexStr * 3
elif len(hexStr) == 3:
return (hexStr[0] * 2) + (hexStr[1] * 2) + (hexStr[2] * 2)
elif len(hexStr) > 3 and len(hexStr) < 6:
return '{0:0<6}'.format(hexStr)
elif len(hexStr) > 6:
return hexStr[0:6]
else:
return hexStr
def hexToRGB(hexStr):
'''Converts a hexStr color to a RGB tuple'''
# Pretty self explainatory, but as a note this converts
# each hex pair (base16) to a base10 value
# hexToRGB('ff0000') would return (255, 0, 0) or pure red
hexStr = buildHex(hexStr)
return tuple([int(hexStr[i:i+2], 16) for i in range(0, 6, 2)])
def RGBToHex(RGB):
'''Converts a RGB tuple into a hex color'''
#TODO: Convert to new style formatting
return '%02x%02x%02x' % RGB
def calcGradDiff(startFill, stopFill, distance):
'''
Calculates the difference between the start and
stop fills over the specified distance.
'''
# account for the last pixel
distance = distance - 1.0
return tuple((stopFill[x] - startFill[x])/distance for x in range(3))
def buildPyxlName(pyxl):
'''
Builds an MD5 hash from Pyxl.getInfo, Pyxl.getSize and Pyxl.getOptions
'''
from hashlib import md5
name = '{}-{}-{}'.format(pyxl.getInfo(), pyxl.getSize(), pyxl.getOptions())
return md5(name).hexdigest() + ".jpg"
def savePyxlImage(pyxl, path='imgs'):
'''
A simple save function for pyxl. Consider replacing with your own.
'''
import ImageFile
ImageFile.MAXBLOCK = pyxl.image.size[0] * pyxl.image.size[1]
fullpath = join(path, buildPyxlName(pyxl))
pyxl.image.save(fullpath, 'JPEG', optimize=True,
progressive=True
)
def shiftRGB(old, new, shift):
'''
Shifts an RGB towards a new value.
Shift can be anything that returns an integer or float.
'''
change = lambda x: (x[1]*shift)+(x[0]*(1-shift))
return tuple(map(change, zip(old, new)))
class Pyxl(object):
'''
This class builds an image based on a series of inputs.
Either constructing it solely in PIL or pulling one from flickr.
'''
#TODO: Better documentation.
def __init__(self, info, size, options=None, fonts='fonts'):
# Initializing some very key variables.
self.info = {}
self.size = ()
self.options = {}
self.fonts = {}
self.draw = None
self.image = None
self.defaults = {
'font':'liberationsans',
'colors':[hexToRGB('ffffff'), hexToRGB('ff0000')]
}
# Build the fonts dictionary.
self.loadFonts(fonts)
# Load all the arguments passed to Pyxl
self.setInfo(info)
self.setSize(size)
self.setOptions(options)
def setInfo(self, info):
'''
This function sets the information Pyxl needs to start an image.
It accepts one of three string patterns:
tag or a series of tags delimited by a comma
-- In this case, it is a flickr image
OR
color:hex
-- A solid color image
OR
gradient:hex,hex
-- A gradient image, there is an optional h argument at the end
The info variable contains the following bits:
type: This tells Pyxl what sort of image to produce
tags: This key is only set for a flickr image,
it determines what tags to pull an image from.
color: A list of RGB tuples.
'''
# Determine which kind of image we want
# No colon found, we want to contact flickr
if info.find(':') == -1:
self.info['type'] = 'flickr'
self.info['tags'] = info.split(',')
self.draw = self.drawFlickr
# We are building an image with PIL
else:
info = info.split(':')
# We're drawing a gradient.
if info[1].find(',') != -1:
self.draw = self.drawGradient
self.info['type'] = 'gradient'
info[1] = info[1].split(',')
self.info['colors'] = [ hexToRGB(info[1][0]),
hexToRGB(info[1][1])
]
# Specifically, a horizontal gradient
if len(info[1]) == 3:
self.info['type'] = 'hgradient'
# Just a solid image please
else:
self.draw = self.drawColor
self.info['type'] = 'color'
self.info['colors'] = [hexToRGB(info[1])]
def getInfo(self):
'''Returns a string representation of info dictionary.'''
if self.info['type'] == 'flickr':
return ','.join(self.info['tags'])
elif self.info['type'] == 'color':
return 'color:{}'.format(RGBToHex(self.info['colors'][0]))
else:
colors = ','.join([RGBToHex(x) for x in self.info['colors']])
if self.info['type'] == 'hgradient':
colors = colors + ',h'
return 'gradient:{}'.format(colors)
def setSize(self, size):
'''
Sets the total size of the image.
This function accepts a string in the form of widthxheight.
This function will also ensure that the dimensions are between 1
and the maximum (currently 2500)
'''
default = 200
maximum = 2000
# seriously, who needs an image this big
sizes = []
for x in size.split('x'):
try:
# Probably a better way to do this, but no point in letting this
# ruin the script Even though I highly doubt someone will
# pass something like axd as the size argument from the API,
# better safe than sorry.
x = int(x)
except ValueError:
x = default
if x > maximum:
x = maximum
elif x < 1:
x = default
sizes.append(x)
if len(sizes) != 2:
sizes = [sizes[0], sizes[0]]
self.size = tuple(sizes)
def getSize(self):
'''
Returns string representation of the iamge size in
form of widthxheight
'''
return 'x'.join([str(x) for x in self.size])
def setOptions(self, options):
'''
This function accepts a string for the options of Pyxl.
It should be formatted as: option:value,option2:value.
There are just a few current valid options:
seed: This option is to create a new image from the same options.
text: A hex color that is converted to a RGB tuple.
dimensions: This SHOULD be set to hide,
but if it's there, the dimensions are not displayed on the image.
font: This sets the font for the image text,
it uses a defaults if the font isn't listed in Pyxl.fonts
'''
if options is None:
#defaults ahoy!
self.options = {
'text':self.defaults['colors'][0],
'font':self.setFont(self.defaults['font'])
}
else:
valid = ['seed', 'dimensions', 'text', 'font']
for option in options.lower().split(','):
option = option.split(':')
#prevent a bunch of spamming non-recognized options
if option[0] not in valid:
continue
elif option[0] == 'font':
option[1] = self.setFont(option[1])
elif option[0] == 'text':
try:
# again, probably a better way
# but better safe than sorry
option[1] = hexToRGB(option[1])
except ValueError:
option[1] = self.defaults['colors'][0]
elif option[0] == 'dimensions':
option[1] = 'hide'
elif option[0] == 'seed' and self.info['type'] != 'flickr':
# There's no point in a seed for a none flickr image
continue
self.options[option[0]] = option[1]
#double check to make sure at least font and text got set.
if 'font' not in self.options:
self.options['font'] = self.setFont(self.defaults['font'])
if 'text' not in self.options:
self.options['text'] = self.defaults['colors'][0]
def getOptions(self):
'''Returns a string representation of all the options set.'''
options = ''
for key in sorted(self.options.keys()):
if key == 'text':
option = RGBToHex(self.options['text'])
elif key == 'font':
option = basename(self.options['font']).lower().split('.')[0]
else:
option = self.options[key]
options = options + '{}:{},'.format(key, option)
return options.rstrip(',')
def loadFonts(self, location='fonts'):
'''
This function scans the location folder for fonts and stores them in a
dictionary. The keys are the lowercased version of the file name,
split at the first dot.
LiberationSans.ttf becomes
{'liberationsans':'fonts/LiberationSans.ttf'}
Currently, it is only implemented to find TrueType fonts.
'''
fonts = glob(join(location, '*.ttf'))
self.fonts = {
basename(font).lower().split('.')[0]:font for font in fonts
}
def setFont(self, font):
'''
This function sets the font for the text on the image.
If it receives a font that isn't in Pyxl's font library,
it sets it to the default.
'''
if font not in self.fonts.keys():
return self.fonts[self.defaults['font']]
return self.fonts[font]
def drawColor(self):
'''Creates a solid colored image.'''
self.image = Image.new('RGB', self.size, self.info['colors'][0])
if 'dimensions' not in self.options:
self.drawDimensions()
def drawGradient(self):
'''Creates a gradient image.'''
# this'll be much easier to work with
height = self.size[1]
width = self.size[0]
# set the correct distance
if self.info['type'] == 'hgradient':
distance = width
else:
distance = height
# again, easier to work with
start = self.info['colors'][0]
stop = self.info['colors'][1]
# make a new blank image
self.image = Image.new('RGB', self.size, hexToRGB('ffffff'))
draw = ImageDraw.Draw(self.image)
for i in range(distance):
# set the correct draw positions
if self.info['type'] == 'hgradient':
pos = (i, 0, i, height)
else:
pos = (0, i, width, i)
# move the start color closer to the end color
rgb = shiftRGB(start, stop, float(i)/distance)
fill = tuple(map(int, map(round, rgb)))
draw.line(pos, fill=fill)
if 'dimensions' not in self.options:
self.drawDimensions()
def drawFlickr(self):
'''Creates an image based on a flickr image.'''
pass
def getFlickrImage(self):
'''
Retrieves a single flickr image based on Pyxl.info['tags']
'''
pass
def drawDimensions(self):
'''Creates the dimensions image.'''
text = self.getSize()
size = 1
font = ImageFont.truetype(self.options['font'], size)
img_fraction = 0.5
while (font.getsize(text)[0] < int(self.size[0] * img_fraction)) and \
(font.getsize(text)[1] < int(self.size[1]*img_fraction)):
size += 1
font = ImageFont.truetype(self.options['font'], size)
font = ImageFont.truetype(self.options['font'], size)
pos = ( (self.size[0] - font.getsize(text)[0])/2,
(self.size[1] - font.getsize(text)[1])/2
)
draw = ImageDraw.Draw(self.image)
draw.text(pos, text, font=font, fill=self.options['text'])
| mit | 8,869,948,142,380,855,000 | 29.780886 | 79 | 0.526089 | false | 4.202737 | false | false | false |
elemel/drillion | drillion/cannon_entity_creator.py | 1 | 1825 | from drillion.animation_component import AnimationComponent
from drillion.collision import CollisionBody
from drillion.collision_component import CollisionComponent
from drillion.entity import Entity
from drillion.maths import Polygon2, Transform2
from drillion.sprite import PolygonSprite
from drillion.sprite_component import SpriteComponent
from drillion.transform_component import TransformComponent
import random
class CannonEntityCreator(object):
def __init__(self, animation_update_phase, draw_phase, batch):
self._animation_update_phase = animation_update_phase
self._draw_phase = draw_phase
self._batch = batch
def create(self, ship_entity, position=(0.0, 0.0), angle=0.0, length=1.0,
width=0.1, color=(255, 255, 255, 255)):
vertices = [(0.0, -0.5), (1.0, -0.5), (1.0, 0.5), (0.0, 0.5)]
polygon = Polygon2(vertices)
parent_transform_component = \
ship_entity.find_component(TransformComponent)
transform = Transform2()
transform.rotate(angle)
transform.scale(length, width)
transform.translate(*position)
transform_component = \
TransformComponent(transform, parent=parent_transform_component)
sprite = PolygonSprite(vertices, color=color, transform=transform)
sprite_component = SpriteComponent(sprite, self._batch)
animation_component = AnimationComponent(transform_component,
sprite_component,
self._animation_update_phase,
self._draw_phase)
components = [transform_component, sprite_component,
animation_component]
return Entity(components, parent=ship_entity)
| mit | -3,641,659,004,998,115,000 | 42.452381 | 78 | 0.647123 | false | 4.324645 | false | false | false |
desmo999r/cmssysadmin | cmssysadmin/__init__.py | 1 | 1437 | import os
import socket
import fcntl
import struct
import subprocess
import logging
logger = logging.getLogger(__name__)
class CmdLine(object):
options = {}
class __metaclass__(type):
def __new__(cls, *kargs, **kwargs):
t = type.__new__(cls, *kargs, **kwargs)
with open("/proc/cmdline") as f:
for option in f.readline().strip().split():
fields = option.split("=")
if len(fields) == 1:
t.options[fields[0]] = True
else:
t.options[fields[0]] = fields[1]
logger.info("/proc/cmdline options: " + str(t.options))
return t
def get_bootif():
try:
mac = CmdLine.options['BOOTIF'][3:].replace('-', ':').strip().lower()
except KeyError:
return None
for n in os.listdir("/sys/class/net"):
with open("/sys/class/net/" + n + "/address") as f:
if mac == f.read().strip().lower():
return n, mac
raise Exception("There is a BOOTIF param but no matching interface")
def get_ip_address(ifname):
"""Returns the NIC current IPv4 address"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
logger.info("Current IP is %s", ip)
return ip
# vim: set ts=4 sw=4 tw=0 et :
| gpl-2.0 | -8,696,028,773,016,488,000 | 28.9375 | 77 | 0.551844 | false | 3.565757 | false | false | false |
spadev/chatlogsync | chatlogsync.py | 1 | 10218 | #!/usr/bin/env python
# Copyright 2013 Evan Vitero
# This file is part of chatlogsync.
# chatlogsync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# chatlogsync is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with chatlogsync. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
import signal
import traceback
from os.path import join, dirname, exists, isfile, isdir, realpath
from argparse import ArgumentParser, ArgumentTypeError
from multiprocessing import Process, cpu_count, Value, Manager, Lock
import chatlogsync
from chatlogsync import const, formats, util, timezones
WORKERS = []
class Progress(object):
"""Thread-safe progress updater"""
def __init__(self):
self._nread = Value('i', 0, lock=False)
self._nwrote = Value('i', 0, lock=False)
self._nexisting = Value('i', 0, lock=False)
self._nerror = Value('i', 0, lock=False)
self._lock = Lock()
def print_status(self, msg=None):
dryrun = ' (DRY RUN)' if const.DRYRUN else ''
if msg:
print_v(msg)
print_('\r[read:%i wrote:%i existing:%i error:%i]%s ' %
(self.nread, self.nwrote, self.nexisting, self.nerror, dryrun),
end='', flush=True, file=sys.stderr)
if msg:
print_v('\n')
def _incr(self, var, n=1):
with self._lock:
var.value += n
def read(self, path):
self._incr(self._nread)
def wrote(self, path):
self._incr(self._nwrote)
self.print_status('wrote %s' % path)
def error(self, path):
tb = traceback.format_exc()
self._incr(self._nerror)
print_e('%s\n%s' % (path, tb))
def existing(self, path):
self._incr(self._nexisting)
print_v('existing %s' % path)
@property
def nerror(self):
return self._nerror.value
@property
def nwrote(self):
return self._nwrote.value
@property
def nread(self):
return self._nread.value
@property
def nexisting(self):
return self._nexisting.value
class Parser(Process):
def __init__(self, outformat, force, destination, queue, files,
progress, fslock):
super(Parser, self).__init__()
self.queue = queue
self.progress = progress
self.tempfiles = []
self.destination = destination
self.outformat = outformat
self.force = force
self._files = files
self._fslock = fslock
self._modules = [x() for x in formats.all_formats.values()]
self._modules_map = {x.type: x for x in self._modules}
self._stopped = Value('i', 0)
self._curpath = ''
def stop(self):
self._stopped.value = 1
@property
def stopped(self):
return self._stopped.value == 1
def cleanup(self):
for tempfile in self.tempfiles:
if exists(tempfile):
os.unlink(tempfile)
def _process_path(self, path):
self._curpath = path
for i, rmodule in enumerate(self._modules):
parsed = rmodule.parse_path(path)
if parsed:
# try this module first next time
if i != 0:
self._modules[i] = self._modules[0]
self._modules[0] = rmodule
break
# file is not a chatlog
if not parsed:
return None
self.progress.read(path)
wmodule = self._modules_map[self.outformat] \
if self.outformat else rmodule
for c in parsed:
self._curpath = path
dstpath = wmodule.get_path(c)
real_dstpath = realpath(join(self.destination, dstpath))
with self._fslock:
if real_dstpath in self._files:
f = 1
elif exists(real_dstpath):
f = 2
else:
f = 0
self._files[real_dstpath] = f
if f:
self.progress.existing(dstpath)
self.progress.print_status()
if not self.force:
continue
if const.DRYRUN:
conversation = c
else:
conversation = rmodule.parse_conversation(c)
tmppath = real_dstpath+'.tmp'
self.tempfiles.append(tmppath)
self._curpath = real_dstpath
self._write_outfile(wmodule, real_dstpath, tmppath,
[conversation])
del self.tempfiles[-1]
self.progress.wrote(dstpath)
def _write_outfile(self, module, path, tmppath, conversations):
# return len(conversations)
dstdir = dirname(path)
with self._fslock:
if not exists(dstdir):
os.makedirs(dstdir)
module.write(tmppath, conversations)
os.rename(tmppath, path)
return len(conversations)
def run(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
path = ''
while True:
try:
path = self.queue.get()
if path is None:
break
self._process_path(path)
except IOError as e:
break
except Exception as e:
self.progress.error(self._curpath)
self.cleanup()
def isfileordir(value):
if not isfile(value) and not isdir(value):
raise ArgumentTypeError("'%s' is not a file or directory" % value)
return value
def isnotfile(value):
if isfile(value):
raise ArgumentTypeError("'%s' is not a file" % value)
return value
def parse_args():
parser = \
ArgumentParser(description=const.PROGRAM_DESCRIPTION,
prog=const.PROGRAM_NAME)
parser.add_argument('source', nargs='+', type=isfileordir,
help=_('source log file or directory'))
parser.add_argument('destination', type=isnotfile,
help=_('destination log directory'))
parser.add_argument("-d", "--debug",
help=_("enable debug output"),
action='store_true',
default=False,
)
parser.add_argument("-f", "--format",
choices=[str(x) for x in formats.output_formats],
help=_("format to use for output files"),
default=None,
)
parser.add_argument("-F", "--force",
help=_("force regeneration of existing logs at "
"destination"),
action='store_true',
default=False,
)
parser.add_argument("-n", "--dry-run",
help=_("perform a trial run with no changes made"),
action='store_true',
default=False,
)
parser.add_argument("--no-comments",
help=_("do not write comments to converted logs"),
action='store_true',
default=False,
)
parser.add_argument("-q", "--quiet",
help=_("suppress warnings"),
action='store_true',
default=False,
)
parser.add_argument("-t", "--threads", metavar="NUM_THREADS",
help=_("use NUM_THREADS worker processes for parsing"),
type=int,
default=cpu_count(),
)
parser.add_argument("-v", "--verbose",
help=_("enable verbose output"),
action='store_true',
default=False,
)
options = parser.parse_args()
if options.debug:
const.DEBUG = True
if options.verbose:
const.VERBOSE = True
if options.quiet:
const.QUIET = True
if options.dry_run:
const.DRYRUN = True
if options.no_comments:
const.NO_COMMENTS = True
return options
def convert(paths, options):
global WORKERS
manager = Manager()
fslock = Lock()
progress = Progress()
queue = manager.Queue()
files = manager.dict()
WORKERS = [Parser(options.format, options.force, options.destination,
queue, files, progress, fslock)
for i in range(options.threads)]
for w in WORKERS:
w.start()
for path in paths:
queue.put(path)
for w in WORKERS:
queue.put(None)
for w in WORKERS:
w.join()
return 0
def main(options):
print_('gathering paths...', end='', flush=True, file=sys.stderr)
src_paths = util.get_paths(options.source)
print_('done', file=sys.stderr)
convert(src_paths, options)
return 0
def cleanup(exitcode):
progress = None
for w in WORKERS:
progress = w.progress
w.stop()
for w in WORKERS:
w.join()
if progress:
progress.print_status('done')
exitcode += progress.nerror
if not const.VERBOSE:
print_('')
return exitcode
if __name__ == "__main__":
options = parse_args()
exitcode = 0
try:
timezones.init()
exitcode = main(options)
except KeyboardInterrupt:
exitcode = 1
print_e("***aborted***")
except Exception as e:
exitcode = 1
traceback.print_exc()
finally:
sys.exit(cleanup(exitcode))
| gpl-3.0 | -4,886,149,893,731,973,000 | 29.777108 | 79 | 0.534743 | false | 4.275314 | false | false | false |
evilsephiroth/plugin.video.vvvvid | vvvvid.py | 1 | 1369 | import urllib2
def f(m):
l = list()
o = 0
b = None
while not b and o < len(m):
n = m[o] <<2
o +=1
k = -1
j = -1
if o < len(m):
n += m[o] >> 4
o += 1
if o < len(m):
k = (m[o - 1] << 4) & 255;
k += m[o] >> 2;
o += 1
if o < len(m):
j = (m[o - 1] << 6) & 255;
j += m[o]
o += 1
else:
b = True
else:
b = True
else:
b = True
l.append(n)
if k != -1:
l.append(k)
if j != -1:
l.append(j)
return l
def dec_ei(h):
g = 'MNOPIJKL89+/4567UVWXQRSTEFGHABCDcdefYZabstuvopqr0123wxyzklmnghij'
c = list()
for e in range(0,len(h)):
c.append(g.find(h[e]))
for e in range(len(c)*2-1,-1,-1):
#print 'e=' + str(e)
a = c[e % len(c)] ^ c[(e+1)%len(c)]
#print 'a='+str(a)
c[e%len(c)] = a
#print 'c['+str(e % len(c))+']='+ str(c[e % len(c)])
c = f(c)
d = ''
for e in range(0,len(c)):
d += '%'+ (('0'+ (str(format(c[e],'x'))))[-2:])
return urllib2.unquote(d) | gpl-2.0 | -2,904,059,955,838,083,000 | 22.482143 | 74 | 0.308985 | false | 3.132723 | false | false | false |