text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
sequencelengths 1
23
| text_hash
stringlengths 64
64
|
---|---|---|---|---|---|---|---|
#!/usr/bin/env python
from datetime import date, timedelta
import click
from paper_cal import *
@click.command()
@click.option(
'--year',
'-y',
default=date.today().year,
help='Year to show',
)
def main(year):
''' '''
# https://en.wikipedia.org/wiki/(137108)_1999_AN10
# https://fr.wikipedia.org/wiki/(137108)_1999_AN10
# https://en.wikipedia.org/wiki/(35396)_1997_XF11
# https://fr.wikipedia.org/wiki/(35396)_1997_XF11
if 2027 == year:
print(f'{date(year, AUGUST, 7)} 06:48 1999 AN10 Asteroid Pass')
if 2028 == year:
print(f'{date(year, OCTOBER, 26)} 06:44 1997 XF11 Asteroid Pass')
# https://en.wikipedia.org/wiki/Daylight_saving_time_by_country
# https://en.wikipedia.org/wiki/Daylight_saving_time_in_Canada
# https://www.timeanddate.com/time/zones/y
# https://www.timeanddate.com/time/zones/r
# https://www.timeanddate.com/time/zones/q
# https://www.timeanddate.com/time/zones/z
# https://www.timeanddate.com/time/zones/a
# https://www.timeanddate.com/time/zones/b
# https://www.timeanddate.com/time/zones/m
# DST Begins = "Spring forward"
# DST Ends = "Fall back"
# UTC-12:00 -> Yankee
# UTC-05:00 -> Romeo
# UTC-04:00 -> Quebec
# UTC+00:00 -> Zulu
# UTC+01:00 -> Alpha
# UTC+02:00 -> Bravo
# UTC+12:00 -> Mike
print(
f'{closest_date(SUNDAY, date(year, MARCH, WEEK2))} 02:00 Daylight Savings Time Begins (CA, US)'
) # Heure d'éte commence (CA, US)
print(
f'{closest_date(SUNDAY, date(year, MARCH, WEEK4), last=True)} 01:00Z Daylight Savings Time Begins (EU, UK)'
) # Heure d'éte commence (EU, UK)
print(
f'{closest_date(SUNDAY, date(year, NOVEMBER, WEEK1))} 02:00 Daylight Savings Time Ends (CA, US)'
) # Heure d'éte termine (CA, US)
print(
f'{closest_date(SUNDAY, date(year, OCTOBER, WEEK4), last=True)} 01:00Z Daylight Savings Time Ends (EU, UK)'
) # Heure d'éte termine (EU, UK)
# https://en.wikipedia.org/wiki/Friday_The_13th
# https://fr.wikipedia.org/wiki/Vendredi_treize
friday = repeat_date(closest_date(FRIDAY, date(year, JANUARY, 4)))
for week in range(1, 55):
found = next(friday)
if year == found.year and 13 == found.day:
print(f'{found} Friday the 13th') # Vendredi treize
# https://www.canada.ca/en/canadian-heritage/services/important-commemorative-days.html
# https://www.canada.ca/fr/patrimoine-canadien/services/journees-importantes-commemoratives.html
# https://www.canada.ca/en/canadian-heritage/news/2022/01/statement-by-minister-hussen-on-raoul-wallenberg-day.html
# https://www.canada.ca/fr/patrimoine-canadien/nouvelles/2022/01/declaration-du-ministrehussen-a-loccasion-de-la-journee-raoulwallenberg.html
# https://en.wikipedia.org/wiki/Raoul_Wallenberg
# https://fr.wikipedia.org/wiki/Raoul_Wallenberg
print(f'{date(year, JANUARY, 17)} Raoul Wallenburg Day')
# Journée Raoul Wallenberg
# https://en.wikipedia.org/wiki/Martin_Luther_King_Jr._Day
# https://fr.wikipedia.org/wiki/Martin_Luther_King_Day
print(
f'{closest_date(MONDAY, date(year, JANUARY, WEEK3))} Martin Luther King Jr. Day (US)'
) # Journée de Martin Luther King Jr. (US)
# Inauguration Day (US) is January 20th or the 21st if the 20th is a Sunday
# every 4th year where "year mod 4 == 1" (2001, ..., 2013, 2017, 2021,
# 2025, 2029, etc.)
# https://en.wikipedia.org/wiki/United_States_presidential_inauguration
if 1 == year % 4:
if SUNDAY == date.weekday(date(year, JANUARY, 20)):
print(f'{date(year, JANUARY, 21)} Inauguration Day (US)')
else:
print(f'{date(year, JANUARY, 20)} Inauguration Day (US)')
# Jour d'inauguration (US)
# https://en.wikipedia.org/wiki/Groundhog_Day
# https://fr.wikipedia.org/wiki/Jour_de_la_marmotte
print(f'{date(year, FEBRUARY, 2)} Groundhog Day') # Jour de la marmotte
# https://en.wikipedia.org/wiki/Washington's_Birthday
# https://en.wikipedia.org/wiki/Presidents%27_Day
# https://fr.wikipedia.org/wiki/Presidents_Day
print(f'{closest_date(MONDAY, date(year, FEBRUARY, WEEK3))} President\'s Day (US)')
# Journée de la Présidence (US)
# https://en.wikipedia.org/wiki/April_Fools'_Day
# https://fr.wikipedia.org/wiki/Poisson_d%27avril
print(f'{date(year, APRIL, 1)} April Fool\'s Day') # Poisson d'avril
# https://en.wikipedia.org/wiki/Tartan_Day
# https://fr.wikipedia.org/wiki/Tartan_Day
print(f'{date(year, APRIL, 6)} Tartan Day') # Journée du Tartan
# https://en.wikipedia.org/wiki/Earth_Day
# https://fr.wikipedia.org/wiki/Jour_de_la_Terre
print(f'{date(year, APRIL, 22)} Earth Day') # Jour de la Terre
# https://en.wikipedia.org/wiki/Anzac_Day
print(f'{date(year, APRIL, 25)} ANZAC Day (AU, NZ)')
# Jour d'ANZAC (AU, NZ)
# https://en.wikipedia.org/wiki/Mother's_Day
# https://fr.wikipedia.org/wiki/F%C3%AAte_des_M%C3%A8res
# Mothering Sunday (UK) is 4th Sunday of Lent / exactly 3 weeks before Easter Sunday
print(f'{closest_date(SUNDAY, date(year, MAY, WEEK2))} Mother\'s Day')
print(f'{easter(year) - timedelta(days=21)} Mothering Sunday (UK)')
# Fête des mères
# https://en.wikipedia.org/wiki/Memorial_Day
# https://fr.wikipedia.org/wiki/Memorial_Day
print(
f'{closest_date(MONDAY, date(year, MAY, WEEK4), last=True)} Memorial Day (US)'
)
# https://en.wikipedia.org/wiki/Flag_Day_(United_States)
print(f'{date(year, JUNE, 14)} Flag Day (US)') # Jour du drapeau (US)
# https://en.wikipedia.org/wiki/Father's_Day
# https://fr.wikipedia.org/wiki/F%C3%AAte_des_P%C3%A8res
print(f'{closest_date(SUNDAY, date(year, JUNE, WEEK3))} Father\'s Day')
# Fête des pères
# https://en.wikipedia.org/wiki/Independence_Day_%28United_States%29
print(f'{date(year, JULY, 4)} Independence Day (US)')
# Jour de l'indépendance (US)
# https://en.wikipedia.org/wiki/Columbus_Day
print(f'{closest_date(MONDAY, date(year, OCTOBER, WEEK2))} Columbus Day (US)')
# Jour de Columbus (US)
# https://en.wikipedia.org/wiki/Halloween
# https://fr.wikipedia.org/wiki/Halloween
print(f'{date(year, OCTOBER, 31)} Hallowe\'en') # Halloween
# https://en.wikipedia.org/wiki/Thanksgiving
# https://en.wikipedia.org/wiki/Black_Friday_(shopping)
# https://en.wikipedia.org/wiki/Cyber_Monday
print(
f'{closest_date(THURSDAY, date(year, NOVEMBER, WEEK4))} Thanksgiving Day (US)'
) # Action de Grâce (US)
print(
f'{closest_date(THURSDAY, date(year, NOVEMBER, WEEK4)) + timedelta(days=1)} Black Friday (US)'
) # Vendredi Noir (US)
print(
f'{closest_date(THURSDAY, date(year, NOVEMBER, WEEK4)) + timedelta(days=3)} Cyber Monday (US)'
) # Cyber Lundi (US)
# https://uk-public-holidays.com/early-may-bank-holiday/
# https://uk-public-holidays.com/spring-bank-holiday/
# https://uk-public-holidays.com/summer-bank-holiday/
print(
f'{closest_date(MONDAY, date(year, MAY, WEEK1))} Early May Bank Holiday (UK)'
) # May Day
if 2022 == year:
print(f'{date(year, JUNE, 2)} Spring Bank Holiday (UK)')
print(f'{date(year, JUNE, 3)} Platinum Jubilee Bank Holiday (UK)')
else:
print(
f'{closest_date(MONDAY, date(year, MAY, WEEK4), last=True)} Spring Bank Holiday (UK)'
)
print(
f'{closest_date(MONDAY, date(year, AUGUST, WEEK4), last=True)} Summer Bank Holiday (UK)'
)
# https://en.wikipedia.org/wiki/Guy_Fawkes_Night
print(f'{date(year, NOVEMBER, 5)} Guy Fawkes Day (UK)')
# Journée de Guy Fawkes (UK)
# https://en.wikipedia.org/wiki/Hogmanay
# https://fr.wikipedia.org/wiki/Hogmanay
print(f'{date(year, DECEMBER, 31)} Hogmanay (UK)')
# Second Easter (ES)
# Feast of San Juan (ES)
# Assumption of Mary (ES)
# Virgin of Mecy (ES) is there a typo here???
# Diada (ES)
# National Day (ES)
# Constitution Day (ES)
# Immaculate Conception (ES) = Immaculate Conception of Mary???
# Day of Madrid (ES)
# Feast Day of St. Isodore (ES)
# Feast of St. James the Apostle (ES)
# La Almudena (ES)
# https://en.wikipedia.org/wiki/International_Cat_Day
# https://fr.wikipedia.org/wiki/Journ%C3%A9e_internationale_du_chat
# https://en.wikipedia.org/wiki/National_Cat_Day
# https://fr.wikipedia.org/wiki/Journ%C3%A9e_nationale_du_chat
# Caturday!!!
print(f'{date(year, FEBRUARY, 17)} National Cat Day (BR, IT)')
print(f'{date(year, FEBRUARY, 22)} National Cat Day (JP)')
print(f'{date(year, MARCH, 1)} National Cat Day (RU)')
print(f'{date(year, AUGUST, 8)} National Cat Day (CA)')
print(f'{date(year, AUGUST, 8)} International Cat Day')
print(f'{date(year, OCTOBER, 29)} National Cat Day (US)')
# https://en.wikipedia.org/wiki/Caps_lock#International_Caps_Lock_Day
print(f'{date(year, JUNE, 28)} INTERNATIONAL CAPS LOCK DAY')
print(f'{date(year, OCTOBER, 22)} INTERNATIONAL CAPS LOCK DAY')
# JOURNÉE INTERNATIONALE DU VERROUILLAGE DES MAJUSCULES
# https://en.wikipedia.org/wiki/Day_of_the_Programmer
if is_leap(year):
print(f'{date(year, SEPTEMBER, 12)} Day of the Programmer 256th day')
# Jour du programmeur 256e jour
else:
print(f'{date(year, SEPTEMBER, 13)} Day of the Programmer 256th day')
# Jour du programmeur 256e jour
# https://en.wikipedia.org/wiki/Software_Freedom_Day
print(
f'{closest_date(SATURDAY, date(year, SEPTEMBER, WEEK3))} Software Freedom Day'
) # Journée de la liberté des logiciels
# http://worldradioday.org
print(f'{date(year,FEBRUARY, 13)} World Radio Day')
# Journée mondiale de la radio
# http://iaru.org/world-amateur-radio-day.html
print(f'{date(year, APRIL, 18)} World Amateur Radio Day')
# Journée de la radio amateur
# https://en.wikipedia.org/wiki/Pi_Day
# http://www.piday.org/
# https://en.wikipedia.org/wiki/Tau_Day
# https://tauday.com/
# https://en.wikipedia.org/wiki/Pi_Day
# http://piapproximationday.com/
print(f'{date(year, MARCH, 14)} Pi Day 3.14') # Jour de pi 3.14
print(f'{date(year, JUNE, 28)} Tau Day 6.28') # Jour de tau 6.28
print(f'{date(year, JULY, 22)} Pi Approximation Day 22/7')
# Jour d'approximation pi 22/7
print(f'{date(year, JULY, 10)} Nikola Tesla Day')
# https://en.wikipedia.org/wiki/Ada_Lovelace_Day
# http://findingada.com/about/when-is-ald/
print(f'{closest_date(TUESDAY, date(year, OCTOBER, WEEK2))} Ada Lovelace Day')
# Jour de Ada Lovelace
# https://en.wikipedia.org/wiki/International_Lefthanders_Day
# https://fr.wikipedia.org/wiki/Journ%C3%A9e_internationale_des_gauchers
print(f'{date(year, AUGUST, 13)} Left-Handers\' Day')
# Journée internationale des gauchers
print(f'{date(year, FEBRUARY, 20)} {ordinal(year - 1991)} Birthday of Python')
print(f'{date(year, MARCH, 11)} {ordinal(year - 2002)} Birthday of Arch')
print(f'{date(year, MARCH, 15)} {ordinal(year - 2013)} Birthday of Docker')
print(f'{date(year, MARCH, 18)} {ordinal(year - 1985)} Birthday of GNU Manifesto')
print(f'{date(year, MARCH, 21)} {ordinal(year - 1993)} Birthday of NetBSD')
print(f'{date(year, APRIL, 16)} {ordinal(year - 1971)} Birthday of FTP')
print(f'{date(year, JUNE, 1)} {ordinal(year - 1969)} Birthday of Unix')
print(f'{date(year, JUNE, 19)} {ordinal(year - 1984)} Birthday of X-Windows')
print(f'{date(year, JUNE, 19)} {ordinal(year - 1993)} Birthday of FreeBSD')
print(f'{date(year, JUNE, 21)} 11:00 {ordinal(year - 1948)} Birthday of Software')
print(f'{date(year, JUNE, 7)} {ordinal(year - 2014)} Birthday of Kubernetes')
print(f'{date(year, JULY, 16)} {ordinal(year - 1993)} Birthday of Slackware')
print(f'{date(year, AUGUST, 1)} {ordinal(year - 1998)} Birthday of IRC')
print(f'{date(year, AUGUST, 16)} {ordinal(year - 1993)} Birthday of Debian')
print(f'{date(year, AUGUST, 25)} {ordinal(year - 1991)} Birthday of Linux')
print(f'{date(year, SEPTEMBER, 27)} {ordinal(year - 1983)} Birthday of GNU')
print(f'{date(year, SEPTEMBER, 28)} {ordinal(year - 2010)} Birthday of LibreOffice')
print(f'{date(year, OCTOBER, 18)} {ordinal(year - 1995)} Birthday of OpenBSD')
print(f'{date(year, OCTOBER, 19)} {ordinal(year - 2009)} Birthday of Alpine')
print(f'{date(year, OCTOBER, 20)} {ordinal(year - 2004)} Birthday of Ubuntu')
print(f'{date(year, NOVEMBER, 21)} {ordinal(year - 1995)} Birthday of GIMP')
print(f'{date(year, MARCH, 21)} Aries Rises') # Ascension du bélier
print(f'{date(year, APRIL, 19)} Aries Sets') # Descension du bélier
print(f'{date(year, APRIL, 20)} Taurus Rises') # Ascension du taureau
print(f'{date(year, MAY, 20)} Taurus Sets') # Descension du taureau
print(f'{date(year, MAY, 21)} Gemini Rises') # Ascension des gémeaux
print(f'{date(year, JUNE, 20)} Gemini Sets') # Descension des gémeaux
print(f'{date(year, JUNE, 21)} Cancer Rises') # Ascension du cancer
print(f'{date(year, JULY, 22)} Cancer Sets') # Descension du cancer
print(f'{date(year, JULY, 23)} Leo Rises') # Ascension du lion
print(f'{date(year, AUGUST, 22)} Leo Sets') # Descension du lion
print(f'{date(year, AUGUST, 23)} Virgo Rises') # Ascension de la vierge
print(f'{date(year, SEPTEMBER, 22)} Virgo Sets') # Descension de la vierge
print(f'{date(year, SEPTEMBER, 23)} Libra Rises') # Ascension de la balance
print(f'{date(year, OCTOBER, 22)} Libra Sets') # Descension de la balance
print(f'{date(year, OCTOBER, 23)} Scorpio Rises') # Ascension du scorpion
print(f'{date(year, NOVEMBER, 21)} Scorpio Sets') # Descension du scorpion
print(f'{date(year, NOVEMBER, 22)} Sagittarius Rises') # Ascension du sagittaire
print(f'{date(year, DECEMBER, 21)} Sagittarius Sets') # Descension du sagittaire
print(f'{date(year, DECEMBER, 22)} Capricorn Rises') # Ascension du capricorne
print(f'{date(year, JANUARY, 19)} Capricorn Sets') # Descension du capricorne
print(f'{date(year, JANUARY, 20)} Aquarius Rises') # Ascension du verseau
print(f'{date(year, FEBRUARY, 18)} Aquarius Sets') # Descension du verseau
print(f'{date(year, FEBRUARY, 19)} Pisces Rises') # Ascension des poissons
print(f'{date(year, MARCH, 20)} Pisces Sets') # Descension des poissons
if __name__ == '__main__':
main()
| tylert/paper-calendar | holiday_other.py | Python | gpl-3.0 | 14,773 | [
"COLUMBUS"
] | b8c9d8f6fee364ef402350e7b9836f758a054fcd13f77debdac8775200eaed5f |
from .app import Firefly
from .client import Client
from .version import __version__
| rorodata/firefly | firefly/__init__.py | Python | apache-2.0 | 85 | [
"Firefly"
] | 34674ed34d6c4564071e1e0d4f9a27afa9d2dc04c84b7590e08d6cd6de20642c |
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
# flake8: noqa
# because of long lines
import copy
import datetime
import json
from unittest.mock import Mock, call
from unittest.mock import patch as _patch
import attr
import pytest
from swh.model.model import (
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
Origin,
OriginVisit,
OriginVisitStatus,
Person,
RawExtrinsicMetadata,
Revision,
RevisionType,
Snapshot,
SnapshotBranch,
TargetType,
Timestamp,
TimestampWithTimezone,
)
from swh.model.swhids import CoreSWHID, ExtendedObjectType, ExtendedSWHID
from swh.storage import get_storage
from swh.storage.interface import ListOrder, PagedResult
from swh.storage.migrate_extrinsic_metadata import debian_origins_from_row, handle_row
FETCHER = MetadataFetcher(
name="migrate-extrinsic-metadata-from-revisions", version="0.0.1",
)
SWH_AUTHORITY = MetadataAuthority(
type=MetadataAuthorityType.REGISTRY,
url="https://softwareheritage.org/",
metadata={},
)
DIRECTORY_ID = b"a" * 20
DIRECTORY_SWHID = ExtendedSWHID(
object_type=ExtendedObjectType.DIRECTORY, object_id=DIRECTORY_ID
)
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
def patch(function_name, *args, **kwargs):
# It's a long name, this function spares some line breaks in 'with' statements
return _patch(
"swh.storage.migrate_extrinsic_metadata." + function_name, *args, **kwargs
)
def test_debian_origins_from_row():
"""Tests debian_origins_from_row on a real example (with some parts
omitted, for conciseness)."""
origin_url = "deb://Debian/packages/kalgebra"
visit = OriginVisit(
origin=origin_url,
date=datetime.datetime(
2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc,
),
type="deb",
visit=280,
)
storage = get_storage("memory")
storage.origin_add(
[
Origin(url=origin_url),
Origin(url="http://snapshot.debian.org/package/kalgebra/"),
]
)
storage.origin_visit_add([visit])
storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=280,
date=datetime.datetime(
2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc
),
status="full",
snapshot=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4",
metadata=None,
)
],
)
snapshot = Snapshot(
id=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4",
branches={
# ...
b"releases/unstable/main/4:19.12.1-1": SnapshotBranch(
target=b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee",
target_type=TargetType.REVISION,
),
},
)
revision_row = {
"id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee",
"directory": DIRECTORY_ID,
"metadata": {
# ...
"original_artifact": [
{
"filename": "kalgebra_19.12.1-1.dsc",
# ...
},
]
},
}
storage.snapshot_add([snapshot])
assert debian_origins_from_row(revision_row, storage) == [origin_url]
def test_debian_origins_from_row__no_result():
"""Tests debian_origins_from_row when there's no origin, visit, status,
snapshot, branch, or matching branch.
"""
storage = get_storage("memory")
origin_url = "deb://Debian/packages/kalgebra"
snapshot_id = b"42424242424242424242"
revision_id = b"21212121212121212121"
storage.origin_add([Origin(url=origin_url)])
revision_row = {
"id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee",
"directory": DIRECTORY_ID,
"metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]},
}
# no visit
assert debian_origins_from_row(revision_row, storage) == []
storage.origin_visit_add(
[OriginVisit(origin=origin_url, date=now(), type="deb", visit=280,)]
)
# no status
assert debian_origins_from_row(revision_row, storage) == []
status = OriginVisitStatus(
origin=origin_url,
visit=280,
date=now(),
status="full",
snapshot=None,
metadata=None,
)
storage.origin_visit_status_add([status])
# no snapshot
assert debian_origins_from_row(revision_row, storage) == []
status = attr.evolve(status, snapshot=snapshot_id, date=now())
storage.origin_visit_status_add([status])
storage_before_snapshot = copy.deepcopy(storage)
snapshot = Snapshot(id=snapshot_id, branches={})
storage.snapshot_add([snapshot])
# no branch
assert debian_origins_from_row(revision_row, storage) == []
# "remove" the snapshot, so we can add a new one with the same id
storage = copy.deepcopy(storage_before_snapshot)
snapshot = attr.evolve(snapshot, branches={b"foo": None,},)
storage.snapshot_add([snapshot])
# dangling branch
assert debian_origins_from_row(revision_row, storage) == []
# "remove" the snapshot again
storage = copy.deepcopy(storage_before_snapshot)
snapshot = attr.evolve(
snapshot,
branches={
b"foo": SnapshotBranch(target_type=TargetType.REVISION, target=revision_id,)
},
)
storage.snapshot_add([snapshot])
# branch points to unknown revision
assert debian_origins_from_row(revision_row, storage) == []
revision = Revision(
id=revision_id,
message=b"foo",
author=Person.from_fullname(b"foo"),
committer=Person.from_fullname(b"foo"),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1580076204, microseconds=0),
offset_bytes=b"+0100",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1580076204, microseconds=0),
offset_bytes=b"+0100",
),
type=RevisionType.DSC,
directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb",
synthetic=True,
metadata=None,
parents=(),
extra_headers=(),
)
storage.revision_add([revision])
# no matching branch
assert debian_origins_from_row(revision_row, storage) == []
def test_debian_origins_from_row__check_revisions():
"""Tests debian_origins_from_row errors when the revision at the head
of a branch is a DSC and has no parents
"""
storage = get_storage("memory")
origin_url = "deb://Debian/packages/kalgebra"
revision_id = b"21" * 10
storage.origin_add([Origin(url=origin_url)])
revision_row = {
"id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee",
"directory": DIRECTORY_ID,
"metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]},
}
storage.origin_visit_add(
[
OriginVisit(
origin=origin_url,
date=datetime.datetime.now(tz=datetime.timezone.utc),
type="deb",
visit=280,
)
]
)
storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=280,
date=datetime.datetime.now(tz=datetime.timezone.utc),
status="full",
snapshot=b"42" * 10,
metadata=None,
)
]
)
storage.snapshot_add(
[
Snapshot(
id=b"42" * 10,
branches={
b"foo": SnapshotBranch(
target_type=TargetType.REVISION, target=revision_id
)
},
)
]
)
storage_before_revision = copy.deepcopy(storage)
revision = Revision(
id=revision_id,
message=b"foo",
author=Person.from_fullname(b"foo"),
committer=Person.from_fullname(b"foo"),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1580076204, microseconds=0),
offset_bytes=b"+0100",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1580076204, microseconds=0),
offset_bytes=b"+0100",
),
type=RevisionType.DSC,
directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb",
synthetic=True,
metadata=None,
parents=(b"parent " * 2,),
extra_headers=(),
)
storage.revision_add([revision])
with pytest.raises(AssertionError, match="revision with parents"):
debian_origins_from_row(revision_row, storage)
def test_debian_with_extrinsic():
dest_original_artifacts = [
{
"length": 2936,
"filename": "kalgebra_19.12.1-1.dsc",
"checksums": {
"sha1": "f869e9f1155b1ee6d28ae3b40060570152a358cd",
"sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11",
},
"url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc",
},
{
"length": 1156408,
"filename": "kalgebra_19.12.1.orig.tar.xz",
"checksums": {
"sha1": "e496032962212983a5359aebadfe13c4026fd45c",
"sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a",
},
"url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz",
},
{
"length": 10044,
"filename": "kalgebra_19.12.1-1.debian.tar.xz",
"checksums": {
"sha1": "b518bfc2ac708b40577c595bd539faa8b84572db",
"sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67",
},
"url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz",
},
{
"length": 488,
"filename": "kalgebra_19.12.1.orig.tar.xz.asc",
"checksums": {
"sha1": "ff53a5c21c1aef2b9caa38a02fa3488f43df4c20",
"sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd",
},
"url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc",
},
]
source_original_artifacts = [
{k: v for (k, v) in d.items() if k != "url"} for d in dest_original_artifacts
]
row = {
"id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee",
"directory": DIRECTORY_ID,
"date": datetime.datetime(
2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc,
),
"date_offset": 60,
"type": "dsc",
"message": b"Synthetic revision for Debian source package kalgebra version 4:19.12.1-1",
"metadata": {
"extrinsic": {
"raw": {
"id": 2718802,
"name": "kalgebra",
"files": {
"kalgebra_19.12.1-1.dsc": {
"uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc",
"name": "kalgebra_19.12.1-1.dsc",
"size": 2936,
"md5sum": "fd28f604d4cc31a0a305543230f1622a",
"sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11",
},
"kalgebra_19.12.1.orig.tar.xz": {
"uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz",
"name": "kalgebra_19.12.1.orig.tar.xz",
"size": 1156408,
"md5sum": "34e09ed152da762d53101ea33634712b",
"sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a",
},
"kalgebra_19.12.1-1.debian.tar.xz": {
"uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz",
"name": "kalgebra_19.12.1-1.debian.tar.xz",
"size": 10044,
"md5sum": "4f639f36143898d97d044f273f038e58",
"sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67",
},
"kalgebra_19.12.1.orig.tar.xz.asc": {
"uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc",
"name": "kalgebra_19.12.1.orig.tar.xz.asc",
"size": 488,
"md5sum": "3c29291e4e6f0c294de80feb8e9fce4c",
"sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd",
},
},
"version": "4:19.12.1-1",
"revision_id": None,
},
"when": "2020-01-27T19:32:03.925498+00:00",
"provider": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc",
},
"intrinsic": {
"raw": {
"name": "kalgebra",
"version": "4:19.12.1-1",
# ...
},
"tool": "dsc",
},
"original_artifact": source_original_artifacts,
},
}
origin_url = "deb://Debian/packages/kalgebra"
storage = Mock()
deposit_cur = None
with patch("debian_origins_from_row", return_value=[origin_url]):
handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False)
assert storage.method_calls == [
call.raw_extrinsic_metadata_add(
[
RawExtrinsicMetadata(
target=DIRECTORY_SWHID,
discovery_date=datetime.datetime(
2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc,
),
authority=SWH_AUTHORITY,
fetcher=FETCHER,
format="original-artifacts-json",
metadata=json.dumps(dest_original_artifacts).encode(),
origin=origin_url,
revision=CoreSWHID.from_string(
"swh:1:rev:0000036c311ef33a281b05688f6eadcfc0943aee"
),
),
]
),
]
def test_debian_without_extrinsic():
source_original_artifacts = [
{
"name": "pymongo_1.10-1.dsc",
"sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241",
"length": 99,
"sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f",
"sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b",
},
{
"name": "pymongo_1.10.orig.tar.gz",
"sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3",
"length": 99,
"sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f",
"sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad",
},
{
"name": "pymongo_1.10-1.debian.tar.gz",
"sha1": "fbf378296613c8d55e043aec98896b3e50a94971",
"length": 99,
"sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513",
"sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec",
},
]
dest_original_artifacts = [
{
"length": 99,
"filename": "pymongo_1.10-1.dsc",
"checksums": {
"sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241",
"sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f",
"sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b",
},
},
{
"length": 99,
"filename": "pymongo_1.10.orig.tar.gz",
"checksums": {
"sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3",
"sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f",
"sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad",
},
},
{
"length": 99,
"filename": "pymongo_1.10-1.debian.tar.gz",
"checksums": {
"sha1": "fbf378296613c8d55e043aec98896b3e50a94971",
"sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513",
"sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec",
},
},
]
row = {
"id": b"\x00\x00\x01\xc2\x8c\x8f\xca\x01\xb9\x04\xde\x92\xa2d\n\x86l\xe0<\xb7",
"directory": DIRECTORY_ID,
"date": datetime.datetime(
2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc
),
"date_offset": 0,
"type": "dsc",
"message": b"Synthetic revision for Debian source package pymongo version 1.10-1",
"metadata": {
"package_info": {
"name": "pymongo",
"version": "1.10-1",
"changelog": {
# ...
},
"maintainers": [
{"name": "Federico Ceratto", "email": "[email protected]"},
{"name": "Janos Guljas", "email": "[email protected]"},
],
"pgp_signature": {
"date": "2011-03-31T21:02:44+00:00",
"keyid": "2BABC6254E66E7B8450AC3E1E6AA90171392B174",
"person": {"name": "David Paleino", "email": "[email protected]"},
},
"lister_metadata": {"id": 244296, "lister": "snapshot.debian.org"},
},
"original_artifact": source_original_artifacts,
},
}
storage = Mock()
origin_url = "http://snapshot.debian.org/package/pymongo"
deposit_cur = None
with patch("debian_origins_from_row", return_value=[origin_url]):
handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False)
assert storage.method_calls == [
call.raw_extrinsic_metadata_add(
[
RawExtrinsicMetadata(
target=DIRECTORY_SWHID,
discovery_date=datetime.datetime(
2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc
),
authority=SWH_AUTHORITY,
fetcher=FETCHER,
format="original-artifacts-json",
metadata=json.dumps(dest_original_artifacts).encode(),
origin=origin_url,
revision=CoreSWHID.from_string(
"swh:1:rev:000001c28c8fca01b904de92a2640a866ce03cb7"
),
),
]
)
]
| SoftwareHeritage/swh-storage | swh/storage/tests/migrate_extrinsic_metadata/test_debian.py | Python | gpl-3.0 | 19,622 | [
"VisIt"
] | c104bf4802c35f13b513826b79ac1c17648e6fb216f6419a349a5a542563bee0 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import re
import logging
import multiprocessing
from tabulate import tabulate
from pymatgen.io.vasp import Outcar
from pymatgen.apps.borg.hive import SimpleVaspToComputedEntryDrone, \
VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
"""
A master convenience script with many tools for vasp and structure analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Aug 13 2016"
SAVE_FILE = "vasp_data.gz"
def get_energies(rootdir, reanalyze, verbose, detailed, sort, fmt):
"""
Doc string.
"""
if verbose:
logformat = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=logformat)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -r to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
all_data = []
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
all_data.append((e.data["filename"].replace("./", ""),
re.sub(r"\s+", "", e.composition.formula),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
delta_vol))
if len(all_data) > 0:
headers = ("Directory", "Formula", "Energy", "E/Atom", "% vol chg")
print(tabulate(all_data, headers=headers, tablefmt=fmt))
print("")
print(msg)
else:
print("No valid vasp run found.")
os.unlink(SAVE_FILE)
def get_magnetizations(mydir, ion_list):
data = []
max_row = 0
for (parent, subdirs, files) in os.walk(mydir):
for f in files:
if re.match(r"OUTCAR*", f):
try:
row = []
fullpath = os.path.join(parent, f)
outcar = Outcar(fullpath)
mags = outcar.magnetization
mags = [m["tot"] for m in mags]
all_ions = list(range(len(mags)))
row.append(fullpath.lstrip("./"))
if ion_list:
all_ions = ion_list
for ion in all_ions:
row.append(str(mags[ion]))
data.append(row)
if len(all_ions) > max_row:
max_row = len(all_ions)
except:
pass
for d in data:
if len(d) < max_row + 1:
d.extend([""] * (max_row + 1 - len(d)))
headers = ["Filename"]
for i in range(max_row):
headers.append(str(i))
print(tabulate(data, headers))
def analyze(args):
default_energies = not (args.get_energies or args.ion_list)
if args.get_energies or default_energies:
for d in args.directories:
get_energies(d, args.reanalyze, args.verbose,
args.detailed, args.sort, args.format)
if args.ion_list:
if args.ion_list[0] == "All":
ion_list = None
else:
(start, end) = [int(i) for i in re.split(r"-", args.ion_list[0])]
ion_list = list(range(start, end + 1))
for d in args.directories:
get_magnetizations(d, ion_list)
| johnson1228/pymatgen | pymatgen/cli/pmg_analyze.py | Python | mit | 4,753 | [
"VASP",
"pymatgen"
] | a2f8cac287d5454097238e79d74f3f2d2d7be5009262febe8a0b85501f8f2f5e |
print "ex42.py"
from sys import exit
from random import randint
class Game(object):
def __init__(self, start):
self.quips = [
"You died. You kinda suck at this.",
"Nice job, you died ... jackass.",
"Such a luser.",
"I have a small puppy that's better at this."
]
self.start = start
def play(self):
next = self.start
while True:
print "\n---------------------"
room = getattr(self, next)
next = room()
def death(self):
print self.quips[randint(0, len(self.quips)-1)]
exit(1)
def central_corridor(self):
print "The Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "makes him fly into an insane rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke" :
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "Does not compute!"
return 'central_corridor'
def laser_weapon_armory(self):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 10 times then the lock closes forever and you can't"
print "get the bomb. The code is 3 digits."
code ="%d%d%d" % (randint(1,9), randint(1,9) ,randint(1,9))
print "%s" % code
guess = raw_input("[keypad]> ")
guesses = 0
while guess!=code and guesses < 10:
print "BZZZEDD"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism is fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
def the_bridge(self):
print "You burst onto the Bridge with the neutron destruct bomb"
print "under your arm and surprise 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "it goes off."
return 'death'
elif action == "Slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
print "You then jump back through the door, punch the close button"
print "and blast the lock so the Gothons can't get out."
print "Now that the bomb is placed you run to the escape pod to"
print "get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE!"
return 'the_bridge'
def escape_pod(self):
print "You rush through the ship desperately trying to make it to"
print "the escape pod before the whole ship explodes. It seems like"
print "hardly any Gothons are on the ship, so your run is clear of"
print "interference. You get to the chamber with the escape pods, and"
print "now need to pick one to take. Some of them could be damaged"
print "but you don't have time to look. There's 5 pods, which one"
print "do you take?"
good_pod = randint(1,5)
print good_pod
guess = raw_input("[pod #]> ")
if int(guess)!= good_pod:
print "You jump into pod %s and " % guess
print "The pod escapes out into the void of space, then"
print "implodes as the hull ruptures, crushing your body"
print "into jam jelly."
return 'death'
else:
print "You jump into pond %s and " % guess
print "The pod easily slides out into space heading to"
print "the planet below. As it flies to the planet, you look"
print "back and see your ship implode then explode like a"
print "bright star, taking out the Gothon ship at the same"
print "time. You won!"
exit(0)
#end of class Game
a_game = Game("central_corridor")
a_game.play()
| zhaoace/codecraft | python/projects/learnpythonthehardway.org/ex42.py | Python | unlicense | 8,013 | [
"BLAST"
] | c70d7d4be1fe9d57f82116749730da4a037daa5a6e182b6fd6d8681a21277e59 |
"""
Inventory Steps
Steps file for inventories.feature
"""
from os import getenv
import json
import requests
from behave import *
from compare import expect, ensure
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import Select
from app import server
WAIT_SECONDS = 30
BASE_URL = getenv('BASE_URL', 'http://localhost:5000/')
@given(u'the following inventories')
def step_impl(context):
""" Delete all Inventories and load new ones """
headers = {'Content-Type': 'application/json'}
context.resp = requests.delete(context.base_url + '/inventories/reset', headers=headers)
expect(context.resp.status_code).to_equal(204)
create_url = context.base_url + '/inventories'
for row in context.table:
data = {
"name": row['name'],
"quantity": int(row['quantity']),
"status": row['status']
}
payload = json.dumps(data)
context.resp = requests.post(create_url, data=payload, headers=headers)
expect(context.resp.status_code).to_equal(201)
@when(u'I visit the "home page"')
def step_impl(context):
""" Make a call to the base URL """
context.driver.get(context.base_url)
@then(u'I should see "{message}" in the title')
def step_impl(context, message):
""" Check the document title for a message """
expect(context.driver.title).to_contain(message)
@then(u'I should not see "{message}"')
def step_impl(context, message):
error_msg = "I should not see '%s' in '%s'" % (message, context.resp.text)
ensure(message in context.resp.text, False, error_msg)
@when(u'I set the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'inventory_' + element_name.lower()
element = context.driver.find_element_by_id(element_id)
element.clear()
element.send_keys(text_string)
@when(u'I select the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'inventory_' + element_name.lower()
element = Select(context.driver.find_element_by_id(element_id))
element.select_by_value(text_string)
##################################################################
# This code works because of the following naming convention:
# The buttons have an id in the html hat is the button text
# in lowercase followed by '-btn' so the Clean button has an id of
# id='clear-btn'. That allows us to lowercase the name and add '-btn'
# to get the element id of any button
##################################################################
@when(u'I press the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
@then(u'I should see "{name}" in the results')
def step_impl(context, name):
#element = context.driver.find_element_by_id('search_results')
#expect(element.text).to_contain(name)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'search_results'),
name
)
)
expect(found).to_be(True)
@then(u'I should not see "{name}" in the results')
def step_impl(context, name):
element = context.driver.find_element_by_id('search_results')
error_msg = "I should not see '%s' in '%s'" % (name, element.text)
ensure(name in element.text, False, error_msg)
@then(u'I should see the message "{message}"')
def step_impl(context, message):
#element = context.driver.find_element_by_id('flash_message')
#expect(element.text).to_contain(message)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'flash_message'),
message
)
)
expect(found).to_be(True)
##################################################################
# This code works because of the following naming convention:
# The id field for text input in the html is the element name
# prefixed by 'pet_' so the Name field has an id='pet_name'
# We can then lowercase the name and prefix with pet_ to get the id
##################################################################
@then(u'I should see "{text_string}" in the "{element_name}" field')
def step_impl(context, text_string, element_name):
element_id = 'inventory_' + element_name.lower()
#element = context.driver.find_element_by_id(element_id)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element_value(
(By.ID, element_id),
text_string
)
)
#expect(element.get_attribute('value')).to_equal(text_string)
expect(found).to_be(True)
# @when(u'I change "{element_name}" to "{text_string}"')
# def step_impl(context, element_name, text_string):
# element_id = 'pet_' + element_name.lower()
# #element = context.driver.find_element_by_id(element_id)
# element = WebDriverWait(context.driver, WAIT_SECONDS).until(
# expected_conditions.presence_of_element_located((By.ID, element_id))
# )
# element.clear()
# element.send_keys(text_string)
# # @then(u'I should see "{message}" in "{field}"')
# # def step_impl(context, message, field):
# # """ Check a field for text """
# # element = context.driver.find_element_by_id(field)
# # assert message in element.text
# # @when(u'I change "{key}" to "{value}"')
# # def step_impl(context, key, value):
# # context.data[key] = value | NYU-Foxtrot/inventory | features/steps/inventory_steps.py | Python | apache-2.0 | 5,677 | [
"VisIt"
] | efb880921fb4ee32cb6b3e828481b7e7c448f849928854e3a761b6d7d6cdc4ae |
import unittest
from asp.codegen.ast_tools import *
from asp.codegen.cpp_ast import *
import asp.codegen.python_ast as python_ast
class NodeVisitorTests(unittest.TestCase):
def test_for_python_nodes(self):
class Dummy(NodeVisitor):
def visit_Name(self, node):
return False
p = python_ast.Name("hello", False)
self.assertFalse(Dummy().visit(p))
def test_for_cpp_nodes(self):
class Dummy(NodeVisitor):
def visit_CName(self, node):
return False
c = CName("hello")
self.assertFalse(Dummy().visit(c))
def test_for_cpp_children(self):
class Dummy(NodeVisitor):
def __init__(self):
self.worked = False
def visit_CName(self, _):
self.worked = True
c = BinOp(CNumber(1), "+", CName("hello"))
d = Dummy()
d.visit(c)
self.assertTrue(d.worked)
class NodeTransformerTests(unittest.TestCase):
class Dummy(NodeTransformer):
def visit_Name(self, _):
return python_ast.Name("hi", False)
def visit_CName(self, _):
return CName("hi")
def test_for_python_nodes(self):
p = python_ast.Name("hello", False)
result = self.Dummy().visit(p)
self.assertEqual(result.id, "hi")
def test_for_cpp_nodes(self):
c = CName("hello")
result = self.Dummy().visit(c)
self.assertEqual(result.name, "hi")
def test_for_cpp_children(self):
c = BinOp(CNumber(1), "+", CName("hello"))
result = self.Dummy().visit(c)
self.assertEqual(result.right.name, "hi")
class LoopUnrollerTests(unittest.TestCase):
def setUp(self):
# this is "for(int i=0, i<8; i+=1) { a[i] = i; }"
self.test_ast = For(
"i",
CNumber(0),
CNumber(7),
CNumber(1),
Block(contents=[Assign(Subscript(CName("a"), CName("i")),
CName("i"))]))
def test_unrolling_by_2(self):
result = LoopUnroller().unroll(self.test_ast, 2)
# print result
wanted_result ='for(int i=0;(i<=(7-1));i=(i+(1*2)))\n {\n a[i]=i;\n a[(i+1)]=(i+1);\n}'
self.assertEqual(str(result).replace(' ',''), str(wanted_result).replace(' ', ''))
def test_unrolling_by_4(self):
result = LoopUnroller().unroll(self.test_ast, 4)
# print result
wanted_result = 'for(inti=0;(i<=(7-3));i=(i+(1*4)))\n{\na[i]=i;\na[(i+1)]=(i+1);\na[(i+2)]=(i+2);\na[(i+3)]=(i+3);\n}'
self.assertEqual(str(result).replace(' ',''), str(wanted_result).replace(' ', ''))
def test_imperfect_unrolling (self):
result = LoopUnroller().unroll(self.test_ast, 3)
wanted_result = 'for(inti=0;(i<=(7-2));i=(i+(1*3)))\n{\na[i]=i;\na[(i+1)]=(i+1);\na[(i+2)]=(i+2);\n}\nfor(inti=(((((7-0)+1)/3)*3)+0);(i<=7);i=(i+1))\n{\na[i]=i;\n}'
# print str(result)
self.assertEqual(str(result).replace(' ',''), str(wanted_result).replace(' ', ''))
def test_with_1_index(self):
test_ast = For("i",
CNumber(1),
CNumber(9),
CNumber(1),
Block(contents=[Assign(Subscript(CName("a"), CName("i")), CName("i"))]))
result = LoopUnroller().unroll(test_ast, 2)
# print result
class LoopBlockerTests(unittest.TestCase):
def test_basic_blocking(self):
# this is "for(int i=0, i<=7; i+=1) { a[i] = i; }"
test_ast = For(
"i",
CNumber(0),
CNumber(7),
CNumber(1),
Block(contents=[Assign(Subscript(CName("a"), CName("i")),
CName("i"))]))
wanted_output = "for(intii=0;(ii<=7);ii=(ii+(1*2)))\n{\nfor(inti=ii;(i<=min((ii+1),7));i=(i+1))\n{\na[i]=i;\n}\n}"
output = str(LoopBlocker().loop_block(test_ast, 2)).replace(' ', '')
self.assertEqual(output, wanted_output)
class LoopSwitcherTests(unittest.TestCase):
def test_basic_switching(self):
test_ast = For("i",
CNumber(0),
CNumber(7),
CNumber(1),
Block(contents=[For("j",
CNumber(0),
CNumber(3),
CNumber(1),
Block(contents=[Assign(CName("v"), CName("i"))]))]))
wanted_output = "for(intj=0;(j<=3);j=(j+1))\n{\nfor(inti=0;(i<=7);i=(i+1))\n{\nv=i;\n}\n}"
output = str(LoopSwitcher().switch(test_ast, 0, 1)).replace(' ','')
self.assertEqual(output, wanted_output)
def test_more_switching(self):
test_ast = For("i",
CNumber(0),
CNumber(7),
CNumber(1),
Block(contents=[For("j",
CNumber(0),
CNumber(3),
CNumber(1),
Block(contents=[For("k",
CNumber(0),
CNumber(4),
CNumber(1),
Block(contents=[Assign(CName("v"), CName("i"))]))]))]))
wanted_output = "for(intj=0;(j<=3);j=(j+1))\n{\nfor(inti=0;(i<=7);i=(i+1))\n{\nfor(intk=0;(k<=4);k=(k+1))\n{\nv=i;\n}\n}\n}"
output = str(LoopSwitcher().switch(test_ast, 0, 1)).replace(' ','')
self.assertEqual(output, wanted_output)
test_ast = For("i",
CNumber(0),
CNumber(7),
CNumber(1),
Block(contents=[For("j",
CNumber(0),
CNumber(3),
CNumber(1),
Block(contents=[For("k",
CNumber(0),
CNumber(4),
CNumber(1),
Block(contents=[Assign(CName("v"), CName("i"))]))]))]))
wanted_output = "for(intk=0;(k<=4);k=(k+1))\n{\nfor(intj=0;(j<=3);j=(j+1))\n{\nfor(inti=0;(i<=7);i=(i+1))\n{\nv=i;\n}\n}\n}"
output = str(LoopSwitcher().switch(test_ast, 0, 2)).replace(' ','')
self.assertEqual(output, wanted_output)
if __name__ == '__main__':
unittest.main()
| pbirsinger/aspNew | tests/ast_tools_test.py | Python | bsd-3-clause | 6,797 | [
"VisIt"
] | e9816200a38f2ccff10d5c5dbaf5950a961d52b6051756990e9ac6d7e5035116 |
#!/usr/bin/env python
import radical.entk as re
import os
import tarfile
import writeInputs
# unused
# import git
os.environ['RADICAL_VERBOSE'] = 'INFO'
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
os.environ['SAGA_PTY_SSH_TIMEOUT'] = '2000'
os.environ['RADICAL_PILOT_DBURL'] = "mongodb://smush:[email protected]:47361/db_repex_4"
replicas = 4
replica_cores = 1
min_temp = 100
max_temp = 200
timesteps = 500
basename = 'ace-ala'
cycle = 0
md_executable = '/home/scm177/mantel/AMBER/amber14/bin/sander'
# unused as of yet
# ASYNCHRONICITY = 0.5
# wait_ratio = 0
# wait_count = 0
# global variables
max_waiting_list = 2
waiting_replicas = list()
min_completed_cycles = 3
replica_cycles = [0] * replicas
# ------------------------------------------------------------------------------
#
def setup_replicas(replicas, min_temp, max_temp, timesteps, basename):
writeInputs.writeInputs(max_temp=max_temp, min_temp=min_temp,
replicas=replicas, timesteps=timesteps,
basename=basename)
tar = tarfile.open("input_files.tar", "w")
for name in [basename + ".prmtop",
basename + ".inpcrd",
basename + ".mdin"]:
tar.add(name)
for r in range(replicas):
tar.add ('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
os.remove('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
tar.close()
setup_p = re.Pipeline()
setup_p.name = 'untarPipe'
# # unused
# repo = git.Repo('.', search_parent_directories=True)
# aux_function_path = repo.working_tree_dir
untar_stg = re.Stage()
untar_stg.name = 'untarStg'
# Untar Task
untar_tsk = re.Task()
untar_tsk.name = 'untarTsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = ['untar_input_files.py', 'input_files.tar']
untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
untar_tsk.cpu_reqs = 1
untar_tsk.post_exec = []
untar_stg.add_tasks(untar_tsk)
setup_p.add_stages(untar_stg)
replica_sandbox = '$Pipeline_%s_Stage_%s_Task_%s' \
% (setup_p.name, untar_stg.name, untar_tsk.name)
return setup_p, replica_sandbox
# ------------------------------------------------------------------------------
# init replicas
class Replica(object):
# --------------------------------------------------------------------------
#
def __init__(self):
self.cycle = 0 # initial cycle
# --------------------------------------------------------------------------
#
def replica_pipeline(self, rid, cycle, replica_cores, md_executable,
timesteps, replica_sandbox):
# ----------------------------------------------------------------------
def add_md_stg(rid,cycle):
# md stg here
print 'cycle: ', self.cycle
md_tsk = re.Task()
md_stg = re.Stage()
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)
md_tsk.link_input_data = ['%s/inpcrd > inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle) % replica_sandbox,
'%s/prmtop' % replica_sandbox,
'%s/mdin-{replica}-{cycle} > mdin'.format(replica=rid, cycle=self.cycle) % replica_sandbox]
md_tsk.arguments = ['-O',
'-i', 'mdin',
'-p', 'prmtop',
'-c', 'inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle),
'-o', 'out',
'-x', 'mdcrd',
'-r', '%s/inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle + 1) % replica_sandbox,
'-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle) % replica_sandbox]
md_tsk.executable = [md_executable]
md_tsk.cpu_reqs = {'processes' : replica_cores,
'process_type' : '',
'threads_per_process': 1,
'thread_type' : None
}
md_tsk.pre_exec = ['echo $SHARED']
md_stg.add_tasks(md_tsk)
md_stg.post_exec = {
'condition': post_md,
'on_true' : start_ex,
'on_false' : suspend_replica
}
return md_stg
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def add_ex_stg(rid, cycle):
# ex stg here
ex_tsk = re.Task()
ex_stg = re.Stage()
ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
for rid in range(len(waiting_replicas)):
ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle) % replica_sandbox]
ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] # This needs to be fixed
ex_tsk.executable = ['python']
ex_tsk.cpu_reqs = {'processes' : 1,
'process_type' : '',
'threads_per_process': 1,
'thread_type' : None
}
ex_stg.add_tasks(ex_tsk)
ex_stg.post_exec = {'condition': post_ex,
'on_true' : terminate_replicas,
'on_false' : continue_md
}
return ex_stg
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def post_md():
global replica_cycles
print 'replica cyles: %s [%]' % (replica_cycles, rid)
self.cycle += 1
replica_cycles[rid] += 1
print 'replica cyles: %s' % replica_cycles
waiting_replicas.append(rid)
if len(waiting_replicas) < max_waiting_list:
return False
return True
# ----------------------------------------------------------------------
def suspend_replica():
p_replica.suspend()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def start_ex():
ex_stg = add_ex_stg(rid, cycle=self.cycle)
p_replica.add_stages(ex_stg)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def post_ex():
if cycle > min_completed_cycles:
return True
return False
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def terminate_replicas():
# Resume all replicas in list without adding stages
for rid in waiting_replicas:
replica_pipelines[rid].resume()
print "DONE"
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def continue_md():
# This needs to resume replica_pipelines[rid]
# for all rid's in wait list
print "continuing replicas"
global waiting_replicas
for rid in waiting_replicas:
try:
md_stg = add_md_stg(rid, cycle)
replica_pipelines[rid].add_stages(md_stg)
if replica_pipelines[rid] is rid:
pass
else:
replica_pipelines[rid].resume()
# This is throwing an error: cannot resume itself since
# it is not suspended. Since the pipeline that is
# triggering this choice is NOT suspended,
# pipeline.resume() fails. This seems to be happening on
# ALL pipelines somehow.
except:
print "replica is not suspended, cannot resume"
waiting_replicas = []
# ----------------------------------------------------------------------
p_replica = re.Pipeline()
p_replica.name = 'p_{rid}'.format(rid=rid)
md_stg = add_md_stg(rid, cycle)
p_replica.add_stages(md_stg)
return p_replica
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
system, replica_sandbox = setup_replicas(replicas, min_temp, max_temp,
timesteps, basename)
print 'replica sandbox:', replica_sandbox
replica = list()
replica_pipelines = list()
for rid in range(replicas):
print rid
replica = Replica()
r_pipeline = replica.replica_pipeline(rid, cycle, replica_cores,
md_executable, timesteps,
replica_sandbox)
replica_pipelines.append(r_pipeline)
appman = re.AppManager(autoterminate=False, port=32769)
appman.resource_desc = {"resource" : 'local.localhost',
"walltime" : 30,
"cpus" : 4}
appman.workflow = set([system])
appman.run()
appman.workflow = set(replica_pipelines)
appman.run()
appman.resource_terminate()
# ------------------------------------------------------------------------------
| radical-cybertools/radical.repex | old/misc/experimental_async/experimental_async.py | Python | mit | 10,335 | [
"Amber"
] | 18de222033b70bacbe65b0956c9e4750ca444b64a7974f71f8aebabbd048d799 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestSphereWidget(vtk.test.Testing.vtkTest):
def testSphereWidget(self):
# This example demonstrates how to use the vtkSphereWidget to control the
# position of a light.
# These are the pre-recorded events
Recording = \
"# StreamVersion 1\n\
CharEvent 23 266 0 0 105 1 i\n\
KeyReleaseEvent 23 266 0 0 105 1 i\n\
EnterEvent 69 294 0 0 0 0 i\n\
MouseMoveEvent 69 294 0 0 0 0 i\n\
MouseMoveEvent 68 293 0 0 0 0 i\n\
MouseMoveEvent 67 292 0 0 0 0 i\n\
MouseMoveEvent 66 289 0 0 0 0 i\n\
MouseMoveEvent 66 282 0 0 0 0 i\n\
MouseMoveEvent 66 271 0 0 0 0 i\n\
MouseMoveEvent 69 253 0 0 0 0 i\n\
MouseMoveEvent 71 236 0 0 0 0 i\n\
MouseMoveEvent 74 219 0 0 0 0 i\n\
MouseMoveEvent 76 208 0 0 0 0 i\n\
MouseMoveEvent 78 190 0 0 0 0 i\n\
MouseMoveEvent 78 173 0 0 0 0 i\n\
MouseMoveEvent 77 162 0 0 0 0 i\n\
MouseMoveEvent 77 151 0 0 0 0 i\n\
MouseMoveEvent 77 139 0 0 0 0 i\n\
MouseMoveEvent 76 125 0 0 0 0 i\n\
MouseMoveEvent 73 114 0 0 0 0 i\n\
MouseMoveEvent 73 106 0 0 0 0 i\n\
MouseMoveEvent 73 101 0 0 0 0 i\n\
MouseMoveEvent 72 95 0 0 0 0 i\n\
MouseMoveEvent 72 92 0 0 0 0 i\n\
MouseMoveEvent 70 89 0 0 0 0 i\n\
MouseMoveEvent 69 86 0 0 0 0 i\n\
MouseMoveEvent 67 84 0 0 0 0 i\n\
MouseMoveEvent 65 81 0 0 0 0 i\n\
MouseMoveEvent 60 79 0 0 0 0 i\n\
MouseMoveEvent 59 79 0 0 0 0 i\n\
MouseMoveEvent 58 79 0 0 0 0 i\n\
MouseMoveEvent 57 78 0 0 0 0 i\n\
MouseMoveEvent 55 78 0 0 0 0 i\n\
MouseMoveEvent 54 77 0 0 0 0 i\n\
LeftButtonPressEvent 54 77 0 0 0 0 i\n\
MouseMoveEvent 61 79 0 0 0 0 i\n\
MouseMoveEvent 67 83 0 0 0 0 i\n\
MouseMoveEvent 72 88 0 0 0 0 i\n\
MouseMoveEvent 77 90 0 0 0 0 i\n\
MouseMoveEvent 78 91 0 0 0 0 i\n\
MouseMoveEvent 80 92 0 0 0 0 i\n\
MouseMoveEvent 84 93 0 0 0 0 i\n\
MouseMoveEvent 85 94 0 0 0 0 i\n\
MouseMoveEvent 88 97 0 0 0 0 i\n\
MouseMoveEvent 90 100 0 0 0 0 i\n\
MouseMoveEvent 92 102 0 0 0 0 i\n\
MouseMoveEvent 94 103 0 0 0 0 i\n\
MouseMoveEvent 97 105 0 0 0 0 i\n\
MouseMoveEvent 101 107 0 0 0 0 i\n\
MouseMoveEvent 102 109 0 0 0 0 i\n\
MouseMoveEvent 104 111 0 0 0 0 i\n\
MouseMoveEvent 108 113 0 0 0 0 i\n\
MouseMoveEvent 112 115 0 0 0 0 i\n\
MouseMoveEvent 118 119 0 0 0 0 i\n\
MouseMoveEvent 118 120 0 0 0 0 i\n\
MouseMoveEvent 118 123 0 0 0 0 i\n\
MouseMoveEvent 120 125 0 0 0 0 i\n\
MouseMoveEvent 122 128 0 0 0 0 i\n\
MouseMoveEvent 123 129 0 0 0 0 i\n\
MouseMoveEvent 125 132 0 0 0 0 i\n\
MouseMoveEvent 125 134 0 0 0 0 i\n\
MouseMoveEvent 127 138 0 0 0 0 i\n\
MouseMoveEvent 127 142 0 0 0 0 i\n\
MouseMoveEvent 127 147 0 0 0 0 i\n\
MouseMoveEvent 126 152 0 0 0 0 i\n\
MouseMoveEvent 126 155 0 0 0 0 i\n\
MouseMoveEvent 125 160 0 0 0 0 i\n\
MouseMoveEvent 125 167 0 0 0 0 i\n\
MouseMoveEvent 125 169 0 0 0 0 i\n\
MouseMoveEvent 125 174 0 0 0 0 i\n\
MouseMoveEvent 122 179 0 0 0 0 i\n\
MouseMoveEvent 120 183 0 0 0 0 i\n\
MouseMoveEvent 116 187 0 0 0 0 i\n\
MouseMoveEvent 113 192 0 0 0 0 i\n\
MouseMoveEvent 113 193 0 0 0 0 i\n\
MouseMoveEvent 111 195 0 0 0 0 i\n\
MouseMoveEvent 108 198 0 0 0 0 i\n\
MouseMoveEvent 106 200 0 0 0 0 i\n\
MouseMoveEvent 104 202 0 0 0 0 i\n\
MouseMoveEvent 103 203 0 0 0 0 i\n\
MouseMoveEvent 99 205 0 0 0 0 i\n\
MouseMoveEvent 97 207 0 0 0 0 i\n\
MouseMoveEvent 94 208 0 0 0 0 i\n\
MouseMoveEvent 91 210 0 0 0 0 i\n\
MouseMoveEvent 89 211 0 0 0 0 i\n\
MouseMoveEvent 86 211 0 0 0 0 i\n\
MouseMoveEvent 84 211 0 0 0 0 i\n\
MouseMoveEvent 80 211 0 0 0 0 i\n\
MouseMoveEvent 77 211 0 0 0 0 i\n\
MouseMoveEvent 75 211 0 0 0 0 i\n\
MouseMoveEvent 71 211 0 0 0 0 i\n\
MouseMoveEvent 68 211 0 0 0 0 i\n\
MouseMoveEvent 66 210 0 0 0 0 i\n\
MouseMoveEvent 62 210 0 0 0 0 i\n\
MouseMoveEvent 58 209 0 0 0 0 i\n\
MouseMoveEvent 54 207 0 0 0 0 i\n\
MouseMoveEvent 52 204 0 0 0 0 i\n\
MouseMoveEvent 51 203 0 0 0 0 i\n\
MouseMoveEvent 51 200 0 0 0 0 i\n\
MouseMoveEvent 48 196 0 0 0 0 i\n\
MouseMoveEvent 45 187 0 0 0 0 i\n\
MouseMoveEvent 45 181 0 0 0 0 i\n\
MouseMoveEvent 44 168 0 0 0 0 i\n\
MouseMoveEvent 40 161 0 0 0 0 i\n\
MouseMoveEvent 39 154 0 0 0 0 i\n\
MouseMoveEvent 38 146 0 0 0 0 i\n\
MouseMoveEvent 35 131 0 0 0 0 i\n\
MouseMoveEvent 34 121 0 0 0 0 i\n\
MouseMoveEvent 34 110 0 0 0 0 i\n\
MouseMoveEvent 34 103 0 0 0 0 i\n\
MouseMoveEvent 34 91 0 0 0 0 i\n\
MouseMoveEvent 34 86 0 0 0 0 i\n\
MouseMoveEvent 34 73 0 0 0 0 i\n\
MouseMoveEvent 35 66 0 0 0 0 i\n\
MouseMoveEvent 37 60 0 0 0 0 i\n\
MouseMoveEvent 37 53 0 0 0 0 i\n\
MouseMoveEvent 38 50 0 0 0 0 i\n\
MouseMoveEvent 38 48 0 0 0 0 i\n\
MouseMoveEvent 41 45 0 0 0 0 i\n\
MouseMoveEvent 43 45 0 0 0 0 i\n\
MouseMoveEvent 44 45 0 0 0 0 i\n\
MouseMoveEvent 47 43 0 0 0 0 i\n\
MouseMoveEvent 51 44 0 0 0 0 i\n\
MouseMoveEvent 54 44 0 0 0 0 i\n\
MouseMoveEvent 55 44 0 0 0 0 i\n\
MouseMoveEvent 59 44 0 0 0 0 i\n\
MouseMoveEvent 64 44 0 0 0 0 i\n\
MouseMoveEvent 67 44 0 0 0 0 i\n\
MouseMoveEvent 68 44 0 0 0 0 i\n\
MouseMoveEvent 71 44 0 0 0 0 i\n\
MouseMoveEvent 74 44 0 0 0 0 i\n\
MouseMoveEvent 77 44 0 0 0 0 i\n\
MouseMoveEvent 80 45 0 0 0 0 i\n\
MouseMoveEvent 81 45 0 0 0 0 i\n\
MouseMoveEvent 85 49 0 0 0 0 i\n\
MouseMoveEvent 89 50 0 0 0 0 i\n\
MouseMoveEvent 94 52 0 0 0 0 i\n\
MouseMoveEvent 99 56 0 0 0 0 i\n\
MouseMoveEvent 104 58 0 0 0 0 i\n\
MouseMoveEvent 107 61 0 0 0 0 i\n\
MouseMoveEvent 109 63 0 0 0 0 i\n\
MouseMoveEvent 109 67 0 0 0 0 i\n\
MouseMoveEvent 111 83 0 0 0 0 i\n\
MouseMoveEvent 113 86 0 0 0 0 i\n\
MouseMoveEvent 113 87 0 0 0 0 i\n\
MouseMoveEvent 113 89 0 0 0 0 i\n\
MouseMoveEvent 112 93 0 0 0 0 i\n\
MouseMoveEvent 112 97 0 0 0 0 i\n\
MouseMoveEvent 111 104 0 0 0 0 i\n\
MouseMoveEvent 112 108 0 0 0 0 i\n\
MouseMoveEvent 116 115 0 0 0 0 i\n\
MouseMoveEvent 116 123 0 0 0 0 i\n\
MouseMoveEvent 116 129 0 0 0 0 i\n\
MouseMoveEvent 119 138 0 0 0 0 i\n\
MouseMoveEvent 122 141 0 0 0 0 i\n\
MouseMoveEvent 127 148 0 0 0 0 i\n\
MouseMoveEvent 128 161 0 0 0 0 i\n\
MouseMoveEvent 131 166 0 0 0 0 i\n\
MouseMoveEvent 134 168 0 0 0 0 i\n\
MouseMoveEvent 135 171 0 0 0 0 i\n\
MouseMoveEvent 134 174 0 0 0 0 i\n\
MouseMoveEvent 132 176 0 0 0 0 i\n\
MouseMoveEvent 132 178 0 0 0 0 i\n\
MouseMoveEvent 129 180 0 0 0 0 i\n\
MouseMoveEvent 127 182 0 0 0 0 i\n\
MouseMoveEvent 124 185 0 0 0 0 i\n\
MouseMoveEvent 122 186 0 0 0 0 i\n\
MouseMoveEvent 118 189 0 0 0 0 i\n\
MouseMoveEvent 114 191 0 0 0 0 i\n\
MouseMoveEvent 114 193 0 0 0 0 i\n\
MouseMoveEvent 112 193 0 0 0 0 i\n\
MouseMoveEvent 111 194 0 0 0 0 i\n\
MouseMoveEvent 110 197 0 0 0 0 i\n\
MouseMoveEvent 110 198 0 0 0 0 i\n\
MouseMoveEvent 109 199 0 0 0 0 i\n\
MouseMoveEvent 108 200 0 0 0 0 i\n\
MouseMoveEvent 108 201 0 0 0 0 i\n\
MouseMoveEvent 108 202 0 0 0 0 i\n\
MouseMoveEvent 108 203 0 0 0 0 i\n\
MouseMoveEvent 104 206 0 0 0 0 i\n\
LeftButtonReleaseEvent 104 206 0 0 0 0 i\n\
MouseMoveEvent 104 205 0 0 0 0 i\n\
MouseMoveEvent 104 204 0 0 0 0 i\n\
MouseMoveEvent 105 205 0 0 0 0 i\n\
MouseMoveEvent 105 206 0 0 0 0 i\n\
"
# Start by loading some data.
#
dem = vtk.vtkDEMReader()
dem.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
dem.Update()
Scale = 2
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
lo = Scale * dem.GetElevationBounds()[0]
hi = Scale * dem.GetElevationBounds()[1]
shrink = vtk.vtkImageShrink3D()
shrink.SetShrinkFactors(4, 4, 1)
shrink.SetInputConnection(dem.GetOutputPort())
shrink.AveragingOn()
geom = vtk.vtkImageDataGeometryFilter()
geom.SetInputConnection(shrink.GetOutputPort())
geom.ReleaseDataFlagOn()
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(geom.GetOutputPort())
warp.SetNormal(0, 0, 1)
warp.UseNormalOn()
warp.SetScaleFactor(Scale)
warp.ReleaseDataFlagOn()
elevation = vtk.vtkElevationFilter()
elevation.SetInputConnection(warp.GetOutputPort())
elevation.SetLowPoint(0, 0, lo)
elevation.SetHighPoint(0, 0, hi)
elevation.SetScalarRange(lo, hi)
elevation.ReleaseDataFlagOn()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(elevation.GetOutputPort())
normals.SetFeatureAngle(60)
normals.ConsistencyOff()
normals.SplittingOff()
normals.ReleaseDataFlagOn()
normals.Update()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
demMapper.ImmediateModeRenderingOn()
demActor = vtk.vtkActor()
demActor.SetMapper(demMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren)
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
iRen.LightFollowCameraOff()
# iRen.SetInteractorStyle("")
# The callback takes two arguments.
# The first being the object that generates the event and
# the second argument the event name (which is a string).
def MoveLight(widget, event_string):
light.SetPosition(rep.GetHandlePosition())
# Associate the line widget with the interactor
rep = vtk.vtkSphereRepresentation()
rep.SetPlaceFactor(4)
rep.PlaceWidget(normals.GetOutput().GetBounds())
rep.HandleVisibilityOn()
rep.SetRepresentationToWireframe()
# rep HandleVisibilityOff
# rep HandleTextOff
sphereWidget = vtk.vtkSphereWidget2()
sphereWidget.SetInteractor(iRen)
sphereWidget.SetRepresentation(rep)
# sphereWidget.TranslationEnabledOff()
# sphereWidget.ScalingEnabledOff()
sphereWidget.AddObserver("InteractionEvent", MoveLight)
recorder = vtk.vtkInteractorEventRecorder()
recorder.SetInteractor(iRen)
# recorder.SetFileName("c:/record.log")
# recorder.Record()
recorder.ReadFromInputStringOn()
recorder.SetInputString(Recording)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(demActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
ren.SetBackground(0.1, 0.2, 0.4)
cam1 = ren.GetActiveCamera()
cam1.SetViewUp(0, 0, 1)
cam1.SetFocalPoint(dem.GetOutput().GetCenter())
cam1.SetPosition(1, 0, 0)
ren.ResetCamera()
cam1.Elevation(25)
cam1.Azimuth(125)
cam1.Zoom(1.25)
light = vtk.vtkLight()
light.SetFocalPoint(rep.GetCenter())
light.SetPosition(rep.GetHandlePosition())
ren.AddLight(light)
iRen.Initialize()
renWin.Render()
# render the image
renWin.Render()
# Actually probe the data
recorder.Play()
img_file = "TestSphereWidget.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestSphereWidget, 'test')]) | HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Interaction/Widgets/Testing/Python/TestSphereWidget.py | Python | gpl-3.0 | 14,176 | [
"VTK"
] | 792ff8132e4cd47f5fb08a6a1cd1867edd4e6e0ccf44213782ab779c05aea099 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.Input.ParamsByGroup import ParamsByGroup
from peacock.utils import Testing
from peacock.Input.ParameterInfo import ParameterInfo
from peacock.Input.BlockInfo import BlockInfo
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.block_list_requested = 0
self.block_children = ["child0", "child1", "child2"]
def createParam(self, name, value="", cpp_type="string", options=[], required=False, user_added=False, group="Main"):
p = ParameterInfo(None, name)
p.value = value
p.cpp_type = cpp_type
p.options = options
p.required = required
p.user_added = user_added
p.group_name = group
return p
def needBlockList(self, w, blocks):
self.block_list_requested += 1
for b in blocks:
w.setWatchedBlockList(b, self.block_children)
def createTabs(self, params):
b = BlockInfo(None, "/Foo")
for p in params:
b.addParameter(p)
tmap = {"VariableName": ["/Variables"]}
t = ParamsByGroup(b, params, tmap)
t.resize(480, 480)
t.addName("some name")
t.addUserParam("user_param")
t.needBlockList.connect(lambda paths: self.needBlockList(t, paths))
t.updateWatchers()
if params:
self.assertEqual(self.block_list_requested, 1)
t.show()
return t
def createParams(self):
params = []
options = ["option_0", "option_1", "option_2"]
params.append(self.createParam("p0"))
params.append(self.createParam("p1", value="some val", required=True))
params.append(self.createParam("p2", cpp_type="FileName"))
params.append(self.createParam("p3", cpp_type="FileNameNoExtension"))
params.append(self.createParam("p4", cpp_type="MeshFileName"))
params.append(self.createParam("p5", options=options))
params.append(self.createParam("p7", cpp_type="vector", options=options))
params.append(self.createParam("p8"))
params.append(self.createParam("p9", cpp_type="VariableName"))
params.append(self.createParam("p10", cpp_type="vector<VariableName>"))
params.append(self.createParam("p11", group="Group1"))
params.append(self.createParam("p12", group="Group2"))
params.append(self.createParam("p13", group="Group1"))
params.append(self.createParam("p14", group="Group2"))
return params
def testEmpty(self):
b = BlockInfo(None, "/Foo")
t = ParamsByGroup(b, [], {})
t.needBlockList.connect(lambda paths: self.needBlockList(t, paths))
self.assertEqual(t.count(), 0)
t.setWatchedBlockList(t.count(), [])
t.save()
t.reset()
def testParamAdded(self):
t = self.createTabs(self.createParams())
table = t.findTable("Main")
count_before = table.rowCount()
t.addUserParam("new_param")
count_after = table.rowCount()
self.assertEqual(count_before+1, count_after)
row = table.findRow("new_param")
self.assertEqual(row, count_after-1)
t.save()
self.assertNotEqual(t.block.getParamInfo("new_param"), None)
t.addUserParam("param1")
row = table.findRow("param1")
self.assertGreater(row, 0)
self.assertEqual(t.block.getParamInfo("param1"), None)
t.reset()
row = table.findRow("param1")
self.assertEqual(row, -1)
if __name__ == '__main__':
Testing.run_tests()
| nuclear-wizard/moose | python/peacock/tests/input_tab/ParamsByGroup/test_ParamsByGroup.py | Python | lgpl-2.1 | 3,947 | [
"MOOSE"
] | ef5e3e4f04a69ac06bb83213a1c2e627fea78ee7c3302bb21ddeff8fa7a6fcaa |
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import contextlib
import os
import fnmatch
import mimetypes as mimetype_python
import logging
import pygments.lexers
import pygments.util
import binaryornot.check
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFSyntaxError
from pdfminer.pdfdocument import PDFEncryptionError
from pdfminer.pdftypes import PDFException
from commoncode import fileutils
from commoncode import filetype
from typecode import magic2
"""
Utilities to detect and report the type of a file or path based on its name,
extension and mostly its content.
"""
LOG = logging.getLogger(__name__)
data_dir = os.path.join(os.path.dirname(__file__), 'data')
bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
# Python mimetypes path setup using Apache mimetypes DB
os.environ['XDG_DATA_DIRS'] = os.path.join(data_dir, 'apache')
os.environ['XDG_DATA_HOME'] = os.environ['XDG_DATA_DIRS']
APACHE_MIME_TYPES = os.path.join(data_dir, 'apache', 'mime.types')
# Ensure that all dates are UTC, especially for fine free file.
os.environ['TZ'] = 'UTC'
PLAIN_TEXT_EXTENSIONS = ('.rst', '.rest', '.txt', '.md',
# This one is actually not handled by Pygments. There
# are probably more.
'.log')
C_EXTENSIONS = set(['.c', '.cc', '.cp', '.cpp', '.cxx', '.c++', '.h', '.hh',
'.s', '.asm', '.hpp', '.hxx', '.h++', '.i', '.ii', '.m'])
ELF_EXE = 'executable'
ELF_SHARED = 'shared object'
ELF_RELOC = 'relocatable'
ELF_UNKNOWN = 'unknown'
elf_types = (ELF_EXE, ELF_SHARED, ELF_RELOC,)
# TODO:
# http://svn.zope.org/z3c.mimetype/trunk/?pathrev=103648
# http://svn.zope.org/z3c.sharedmimeinfo/trunk/TODO.txt?revision=103668&view=markup
# https://pypi.python.org/pypi/z3c.sharedmimeinfo/0.1.0
# https://github.com/plone/Products.MimetypesRegistry/
# Global registry of Type objects, keyed by location
# TODO: can this be a memroy hog for very large scans?
_registry = {}
def get_type(location):
"""
Return a Type object for location.
"""
abs_loc = os.path.abspath(location)
try:
return _registry[abs_loc]
except KeyError:
t = Type(abs_loc)
_registry[abs_loc] = t
return t
# TODO: simplify code using a cached property decorator
class Type(object):
"""
Content, media and mime type information about a file.
All flags and values are tri-booleans. You can test a value state with
`is`:
- if the value is None, it has not been computed yet
- if the value is False or has some true-ish value it has been computed
Raise an IOError if the location does not exists.
"""
__slots__ = (
'location',
'is_file',
'is_dir',
'is_regular',
'is_special',
'date',
'is_link',
'is_broken_link',
'_size',
'_link_target',
'_mimetype_python',
'_filetype_file',
'_mimetype_file',
'_filetype_pygments',
'_is_pdf_with_text',
'_is_text',
'_is_binary',
'_contains_text',
)
def __init__(self, location):
if (not location
or (not os.path.exists(location)
and not filetype.is_broken_link(location))):
raise IOError("[Errno 2] No such file or directory: "
"'%(location)r'" % locals())
self.location = location
# flags and values
self.is_file = filetype.is_file(location)
self.is_dir = filetype.is_dir(location)
self.is_regular = filetype.is_regular(location)
self.is_special = filetype.is_special(location)
self.date = filetype.get_last_modified_date(location)
self.is_link = filetype.is_link(location)
self.is_broken_link = filetype.is_broken_link(location)
# FIXME: the way the True and False values are checked in properties is verbose and contrived at best
# and is due to use None/True/False as different values
# computed on demand
self._size = None
self._link_target = None
self._mimetype_python = None
self._filetype_file = None
self._mimetype_file = None
self._filetype_pygments = None
self._is_pdf_with_text = None
self._is_text = None
self._is_binary = None
self._contains_text = None
def __repr__(self):
return ('Type(ftf=%r, mtf=%r, ftpyg=%r, mtpy=%r)'
% (self.filetype_file, self.mimetype_file,
self.filetype_pygment, self.mimetype_python))
@property
def size(self):
"""
Return the size of a file or directory
"""
if self._size is None:
self._size = 0
if self.is_file or self.is_dir:
self._size = filetype.get_size(self.location)
return self._size
@property
def link_target(self):
"""
Return a link target for symlinks or an empty string otherwise.
"""
if self._link_target is None:
self._link_target = ''
if self.is_link or self.is_broken_link:
self._link_target = filetype.get_link_target(self.location)
return self._link_target
@property
def mimetype_python(self):
"""
Return the mimetype using the Python stdlib and the Apache HTTPD
mimetype definitions.
"""
if self._mimetype_python is None:
self._mimetype_python = ''
if self.is_file is True:
if not mimetype_python.inited:
mimetype_python.init([APACHE_MIME_TYPES])
val = mimetype_python.guess_type(self.location)[0]
self._mimetype_python = val
return self._mimetype_python
@property
def filetype_file(self):
"""
Return the filetype using the fine free file library.
"""
if self._filetype_file is None:
self._filetype_file = ''
if self.is_file is True:
self._filetype_file = magic2.file_type(self.location)
return self._filetype_file
@property
def mimetype_file(self):
"""
Return the mimetype using the fine free file library.
"""
if self._mimetype_file is None:
self._mimetype_file = ''
if self.is_file is True:
self._mimetype_file = magic2.mime_type(self.location)
return self._mimetype_file
@property
def filetype_pygment(self):
"""
Return the filetype guessed using Pygments lexer, mostly for source code.
"""
if self._filetype_pygments is None:
self._filetype_pygments = ''
if self.is_file:
lexer = get_pygments_lexer(self.location)
if lexer:
self._filetype_pygments = lexer.name or ''
else:
self._filetype_pygments = ''
return self._filetype_pygments
# FIXME: we way we use tri boolean is a tad ugly
@property
def is_binary(self):
"""
Return True is the file at location is likely to be a binary file.
"""
if self._is_binary is None:
self._is_binary = False
if self.is_file is True:
self._is_binary = binaryornot.check.is_binary(self.location)
return self._is_binary
@property
def is_text(self):
"""
Return True is the file at location is likely to be a text file.
"""
if self._is_text is None:
self._is_text = self.is_file is True and self.is_binary is False
return self._is_text
@property
def is_archive(self):
"""
Return True if the file is some kind of archive or compressed file.
"""
# FIXME: we should use extracode archive detection
# TODO: also treat file systems as archives
ft = self.filetype_file.lower()
if (not self.is_text
and (self.is_compressed
or 'archive' in ft
or self.is_package
or self.is_filesystem
or (self.is_office_doc and self.location.endswith('x'))
# FIXME: is this really correct???
or '(zip)' in ft
)
):
return True
else:
return False
@property
def is_office_doc(self):
loc = self.location.lower()
if loc.endswith(('.doc', '.docx', '.xlsx', '.xlsx', '.ppt', '.pptx',)):
return True
else:
return False
@property
def is_package(self):
"""
Return True if the file is some kind of packaged archive.
"""
ft = self.filetype_file.lower()
loc = self.location.lower()
if ('debian binary package' in ft
or ft.startswith('rpm ')
or (ft == 'posix tar archive' and loc.endswith('.gem'))
or (ft.startswith(('zip archive',)) and loc.endswith(('.jar', '.war', '.ear', '.egg', '.whl',)))
or (ft.startswith(('java archive',)) and loc.endswith(('.jar', '.war', '.ear', '.zip',)))
):
return True
else:
return False
@property
def is_compressed(self):
"""
Return True if the file is some kind of compressed file.
"""
ft = self.filetype_file.lower()
if (not self.is_text
and ('compressed' in ft
or self.is_package
or (self.is_office_doc and self.location.endswith('x'))
)):
return True
else:
return False
@property
def is_filesystem(self):
"""
Return True if the file is some kind of file system or disk image.
"""
ft = self.filetype_file.lower()
if ('squashfs filesystem' in ft):
return True
else:
return False
@property
def is_media(self):
"""
Return True if the file is likely to be a media file.
"""
# TODO: fonts?
mt = self.mimetype_file
mimes = ('image', 'picture', 'audio', 'video', 'graphic', 'sound',)
ft = self.filetype_file.lower()
types = (
'image data', 'graphics image', 'ms-windows metafont .wmf',
'windows enhanced metafile',
'png image', 'interleaved image', 'microsoft asf', 'image text',
'photoshop image', 'shop pro image', 'ogg data', 'vorbis', 'mpeg',
'theora', 'bitmap', 'audio', 'video', 'sound', 'riff', 'icon',
'pc bitmap', 'image data',
)
if any(m in mt for m in mimes) or any(t in ft for t in types):
return True
else:
return False
@property
def is_media_with_meta(self):
"""
Return True if the file is a media file that may contain text metadata.
"""
# For now we only exclude PNGs, though there are likely several other
# mp(1,2,3,4), jpeg, gif all have support for metadata
if self.is_media and 'png image' in self.filetype_file.lower():
return False
else:
return True
@property
def is_pdf(self):
"""
Return True if the file is highly likely to be a pdf file.
"""
ft = self.mimetype_file
if 'pdf' in ft:
return True
else:
return False
@property
def is_pdf_with_text(self):
"""
Return True if the file is a pdf file from which we can extract text.
"""
if self._is_pdf_with_text is None:
self._is_pdf_with_text = False
if not self.is_file is True and not self.is_pdf is True:
self._is_pdf_with_text = False
else:
with open(self.location, 'rb') as pf:
try:
with contextlib.closing(PDFParser(pf)) as parser:
doc = PDFDocument(parser)
self._is_pdf_with_text = doc.is_extractable
except (PDFSyntaxError, PDFException, PDFEncryptionError):
self._is_pdf_with_text = False
return self._is_pdf_with_text
@property
def contains_text(self):
"""
Return True if a file possibly contains some text.
"""
if self._contains_text is None:
if not self.is_file:
self._contains_text = False
elif self.is_text:
self._contains_text = True
elif self.is_pdf and not self.is_pdf_with_text:
self._contains_text = False
elif self.is_compressed or self.is_archive:
self._contains_text = False
elif self.is_media and not self.is_media_with_meta:
self._contains_text = False
else:
self._contains_text = True
return self._contains_text
@property
def is_script(self):
"""
Return True if the file is script-like.
"""
ft = self.filetype_file.lower()
if self.is_text is True and ('text' in ft and 'script' in ft):
return True
else:
return False
@property
def is_source(self):
"""
Return True if the file is source code.
"""
if self.is_text is False:
return False
if self.location.endswith(PLAIN_TEXT_EXTENSIONS):
return False
ft = self.filetype_file.lower()
pt = self.filetype_pygment.lower()
if 'xml' not in ft and \
('xml' not in pt or self.location.endswith('pom.xml')) and \
(pt or self.is_script is True):
return True
else:
return False
@property
def programming_language(self):
"""
Return the programming language if the file is source code or an empty
string.
"""
return self.is_source and self.filetype_pygment or ''
@property
def is_c_source(self):
ext = fileutils.file_extension(self.location)
if self.is_text is True and ext.lower() in C_EXTENSIONS:
return True
else:
return False
@property
def is_winexe(self):
"""
Return True if a the file is a windows executable.
"""
ft = self.filetype_file.lower()
if 'executable for ms windows' in ft or ft.startswith('pe32'):
return True
else:
return False
@property
def is_elf(self):
ft = self.filetype_file.lower()
if (ft.startswith('elf')
and (ELF_EXE in ft
or ELF_SHARED in ft
or ELF_RELOC in ft)):
return True
else:
return False
@property
def elf_type(self):
if self.is_elf is True:
ft = self.filetype_file.lower()
for t in elf_types:
if t in ft:
return t
return ELF_UNKNOWN
else:
return ''
@property
def is_stripped_elf(self):
if self.is_elf is True:
if 'not stripped' not in self.filetype_file.lower():
return True
else:
return False
else:
return False
@property
def is_java_source(self):
"""
FIXME: Check the filetype.
"""
if self.is_file is True:
name = fileutils.file_name(self.location)
if (fnmatch.fnmatch(name, '*.java')
or fnmatch.fnmatch(name, '*.aj')
or fnmatch.fnmatch(name, '*.ajt')
):
return True
else:
return False
else:
return False
@property
def is_java_class(self):
"""
FIXME: Check the filetype.
"""
if self.is_file is True:
name = fileutils.file_name(self.location)
if fnmatch.fnmatch(name, '*?.class'):
return True
else:
return False
else:
return False
def get_pygments_lexer(location):
"""
Given an input file location, return a Pygments lexer appropriate for
lexing this file content.
"""
try:
T = _registry[location]
if T.is_binary:
return
except KeyError:
if binaryornot.check.is_binary(location):
return
try:
# FIXME: Latest Pygments versions should work fine
# win32_bug_on_s_files = dejacode.on_windows and location.endswith('.s')
# NOTE: we use only the location for its file name here, we could use
# lowercase location may be
lexer = pygments.lexers.get_lexer_for_filename(location,
stripnl=False,
stripall=False)
return lexer
except pygments.util.ClassNotFound:
try:
# if Pygments does not guess we should not carry forward
# read the first 4K of the file
with open(location, 'rb') as f:
content = f.read(4096)
guessed = pygments.lexers.guess_lexer(content)
return guessed
except pygments.util.ClassNotFound:
return
def get_filetype(location):
"""
LEGACY: Return the best filetype for location using multiple tools.
"""
T = get_type(location)
filetype = T.filetype_file.lower()
filetype_pygment = T.filetype_pygment
# 'file' is not good at detecting language, if pygment even can't
# detect it, we can ignore it
if T.is_text and T.filetype_pygment:
# Pygment tends to recognize many XML files are Genshi files
# Genshi is rare and irrelevant, just declare as XML
ftpl = filetype_pygment.lower()
if 'genshi' in ftpl or 'xml+evoque' in ftpl:
return 'xml language text'
# pygment recognizes elfs as Groff files
if not ('roff' in filetype_pygment and 'roff' not in filetype):
if filetype_pygment.lower() != 'text only':
# FIXME: this 'language text' is ugly
return filetype_pygment.lower() + ' language text'
return filetype
STD_INCLUDES = ('/usr/lib/gcc', '/usr/lib', '/usr/include',
'<built-in>', '/tmp/glibc-',)
def is_standard_include(location):
"""
Return True if a file path refers to something that looks like a
standard include.
"""
if (location.startswith(STD_INCLUDES) or location.endswith(STD_INCLUDES)):
return True
else:
return False
| yashdsaraf/scancode-toolkit | src/typecode/contenttype.py | Python | apache-2.0 | 20,201 | [
"VisIt"
] | 8edc605800d852890d56f5a553f71bd562d501153626e55a6d50bb94d8bf222e |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import sys
#support python 3
#import types
if sys.version[0] == '2':
StringTypes = (str,unicode)
else:
StringTypes = (str,)
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
try:
iteritems = adef.iteritems()
except Exception:
iteritems = iter(adef.items())
for k, v in iteritems:
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == tuple:
if v[1] == MUST:
if k not in attrs:
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == list:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
letter-spacing
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
# Create an aliasing system. Many sources use non-standard tags, because browsers allow
# them to. This allows us to map a nonstandard name to the standard one.
nonStandardAttrNames = {
'bgcolor': 'background-color',
}
def mapNonStandardAttrs(c, n, attrList):
for attr in nonStandardAttrNames:
if attr in attrList and nonStandardAttrNames[attr] not in c:
c[nonStandardAttrNames[attr]] = attrList[attr]
return c
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
node.cssAttrs = CachedCSSAttr
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def lower(sequence):
if type(sequence) in StringTypes:
return sequence.lower()
else:
return sequence[0].lower()
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = getColor(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family" in c.cssAttr:
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if "font-size" in c.cssAttr:
# XXX inherit
c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if "line-height" in c.cssAttr:
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if "letter-spacing" in c.cssAttr:
c.frag.letterSpacing = c.cssAttr["letter-spacing"]
if "-pdf-line-spacing" in c.cssAttr:
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if "font-weight" in c.cssAttr:
value = lower(c.cssAttr["font-weight"])
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if "font-style" in c.cssAttr:
value = lower(c.cssAttr["font-style"])
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if "white-space" in c.cssAttr:
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if "text-align" in c.cssAttr:
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if "vertical-align" in c.cssAttr:
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if "height" in c.cssAttr:
try:
c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct!
except TypeError:
# sequence item 0: expected string, tuple found
c.frag.height = "".join(toList(c.cssAttr["height"][0]))
if c.frag.height in ("auto",):
c.frag.height = None
if "width" in c.cssAttr:
try:
c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct!
except TypeError:
c.frag.width = "".join(toList(c.cssAttr["width"][0]))
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if "zoom" in c.cssAttr:
zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if "margin-top" in c.cssAttr:
c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize)
if "margin-bottom" in c.cssAttr:
c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize)
if "margin-left" in c.cssAttr:
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
if "margin-right" in c.cssAttr:
kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
if "text-indent" in c.cssAttr:
c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize)
if "list-style-type" in c.cssAttr:
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if "list-style-image" in c.cssAttr:
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if "padding-top" in c.cssAttr:
c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize)
if "padding-bottom" in c.cssAttr:
c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize)
if "padding-left" in c.cssAttr:
c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize)
if "padding-right" in c.cssAttr:
c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if "border-top-width" in c.cssAttr:
c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize)
if "border-bottom-width" in c.cssAttr:
c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if "border-left-width" in c.cssAttr:
c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize)
if "border-right-width" in c.cssAttr:
c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize)
if "border-top-style" in c.cssAttr:
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if "border-bottom-style" in c.cssAttr:
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if "border-left-style" in c.cssAttr:
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if "border-right-style" in c.cssAttr:
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if "border-top-color" in c.cssAttr:
c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"])
if "border-bottom-color" in c.cssAttr:
c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"])
if "border-left-color" in c.cssAttr:
c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"])
if "border-right-color" in c.cssAttr:
c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
if attr.get("type", "").lower() in ("", "text/css") and \
(not media or "all" in media or "print" in media or "pdf" in media):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=None, **kw):
if path is None:
path = []
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
#indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
#log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.cssAttr = mapNonStandardAttrs(context.cssAttr, node, attr)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = lower(context.cssAttr.get("display", "inline"))
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if "-pdf-next-page" in context.cssAttr:
context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if "-pdf-page-break" in context.cssAttr:
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if "-pdf-frame-break" in context.cssAttr:
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if "page-break-before" in context.cssAttr:
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if "page-break-after" in context.cssAttr:
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if "-pdf-keep-with-next" in context.cssAttr:
context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"])
if "-pdf-outline" in context.cssAttr:
context.frag.outline = getBool(context.cssAttr["-pdf-outline"])
if "-pdf-outline-level" in context.cssAttr:
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if "-pdf-outline-open" in context.cssAttr:
context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"])
if "-pdf-word-wrap" in context.cssAttr:
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if "-pdf-keep-in-frame-mode" in context.cssAttr:
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if "-pdf-keep-in-frame-max-width" in context.cssAttr:
keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if "-pdf-keep-in-frame-max-height" in context.cssAttr:
keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
global CSSAttrCache
CSSAttrCache = {}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if type(src) in StringTypes:
if type(src) is unicode:
# If an encoding was provided, do not change it.
if not encoding:
encoding = "utf-8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
if encoding:
xml_output.write(document.toprettyxml(encoding=encoding))
else:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addDefaultCSS(default_css)
pisaPreLoop(document, context)
#try:
context.parseCSS()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
| b-me/xhtml2pdf | xhtml2pdf/parser.py | Python | apache-2.0 | 25,596 | [
"VisIt"
] | e0cc6822d25193c6a12feaab22dc224ba19956bc9ec19566fa94c7135dcf84b8 |
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_arrays
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (length)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`sigma_` : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self._set_intercept(X_mean, y_mean, X_std)
return self
| jmargeta/scikit-learn | sklearn/linear_model/bayes.py | Python | bsd-3-clause | 15,486 | [
"Gaussian"
] | a9d871d468450953ff5e386fe03641f3c9e6854e9dbfe0670247a47d75237b2f |
#!/usr/bin/env python
import httplib
import httplib2
import os
import random
import sys
import time
import googleapiclient
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "config/client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
| z/xonotic-video-uploads | bin/upload-youtube.py | Python | mit | 6,709 | [
"VisIt"
] | 7794665366084e24a8d93e3ab2b045449f9a37d67fdf7451b4c51ef93b95b681 |
# -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009-2010 Andrew I Baznikin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman, largely based on relationship.py by Don Allingham.
"""
Russian-specific definitions of relationships
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from gprime.lib import Person
import gprime.relationship
#-------------------------------------------------------------------------
_parents_level = [
"",
"родители",
"дедушки/бабушки",
"прадедушки/прабабушки",
"прапрадедушки/прапрабабушки (5 поколение)",
"прапрапрадедушки/прапрапрабабушки (6 поколение)",
"прапрапрапрадедушки/прапрапрапрабабушки (7 поколение)",
"прапрапрапрапрадедушки/прапрапрапрапрабабушки (8 поколение)",
]
_male_cousin_level = [
"",
"двоюродный",
"троюродный",
"четвероюродный",
"пятиюродный",
"шестиюродный",
"семиюродный",
"восьмиюродный",
"девятиюродный",
"десятиюродный",
"одиннацатиюродный",
"двенадцатиюродный",
"тринадцатиюродный",
"четырнадцатиюродный",
"пятнадцатиюродный",
"шестнадцатиюродный",
"семнадцатиюродный",
"восемнадцатиюродный",
"девятнадцатиюродный",
"двадцатиюродный",
]
_female_cousin_level = [
"",
"двоюродная",
"троюродная",
"четвероюродная",
"пятиюродная",
"шестиюродная",
"семиюродная",
"восьмиюродная",
"девятиюродная",
"десятиюродная",
"одиннацатиюродная",
"двенадцатиюродная",
"тринадцатиюродная",
"четырнадцатиюродная",
"пятнадцатиюродная",
"шестнадцатиюродная",
"семнадцатиюродная",
"восемнадцатиюродная",
"девятнадцатиюродная",
"двадцатиюродная",
]
_cousin_level = [
"",
"двоюродные",
"троюродные",
"четвероюродные",
"пятиюродные",
"шестиюродные",
"семиюродные",
"восьмиюродные",
"девятиюродные",
"десятиюродные",
"одиннацатиюродные",
"двенадцатиюродные",
"тринадцатиюродные",
"четырнадцатиюродные",
"пятнадцатиюродные",
"шестнадцатиюродные",
"семнадцатиюродные",
"восемнадцатиюродные",
"девятнадцатиюродные",
"двадцатиюродные",
]
_junior_male_removed_level = [
"брат",
"племянник",
"внучатый племянник",
"правнучатый племянник",
"праправнучатый племянник",
"прапраправнучатый племянник",
"прапрапраправнучатый племянник",
]
_junior_female_removed_level = [
"сестра",
"племянница",
"внучатая племянница",
"правнучатая племянница",
"праправнучатая племянница",
"прапраправнучатая племянница",
"прапрапраправнучатая племянница",
]
_juniors_removed_level = [
"братья/сестры",
"племянники",
"внучатые племянники",
"правнучатые племянники",
"праправнучатые племянники",
"прапраправнучатые племянники",
"прапрапраправнучатые племянники",
]
_senior_male_removed_level = [
"",
"дядя",
"дед",
"прадед",
"прапрадед",
"прапрапрадед",
"прапрапрапрадед",
]
_senior_female_removed_level = [
"",
"тётя",
"бабушка",
"прабабушка",
"прапрабабушка",
"прапрапрабабушка",
"прапрапрапрабабушка",
]
_seniors_removed_level = [
"",
"дяди/тёти",
"дедушки/бабушки",
"прадеды/прабабушки",
"прапрадеды/прапрабабушки",
"прапрапрадеды/прапрапрабабушки",
"прапрапрапрадеды/прапрапрапрабабушки",
]
_father_level = [
"",
"отец",
"дед",
"прадед",
"прапрадед",
"прапрапрадед",
"прапрапрапрадед",
]
_mother_level = [
"",
"мать",
"бабушка",
"прабабушка",
"прапрабабушка",
"прапрапрабабушка",
"прапрапрапрабабушка",
]
_son_level = [
"",
"сын",
"внук",
"правнук",
"праправнук",
"прапраправнук",
"прапрапраправнук",
]
_daughter_level = [
"",
"дочь",
"внучка",
"правнучка",
"праправнучка",
"прапраправнучка",
"прапрапраправнучка",
]
_children_level = [
"",
"дети",
"внуки",
"правнуки",
"праправнуки",
"прапраправнуки",
"прапрапраправнуки",
"прапрапрапраправнуки",
]
_sister_level = [
"",
"сестра",
"тётя",
"двоюродная бабушка",
"двоюродная прабабушка",
"двоюродная прапрабабушка",
"двоюродная прапрапрабабушка",
"двоюродная прапрапрапрабабушка",
]
_brother_level = [
"",
"брат",
"дядя",
"двоюродный дед",
"двоюродный прадед",
"двоюродный прапрадед",
"двоюродный прапрапрадед",
"двоюродный прапрапрапрадед",
]
_siblings_level = [
"",
"братья/сестры",
"дядьки/тётки",
"двоюродные дедушки/бабушки",
"двоюродные прадедушки/прабабушки",
"двоюродные прапрадедушки/прапрабабушки (5 поколение)",
"двоюродные прапрапрадедушки/прапрапрабабушки (6 поколение)",
"двоюродные прапрапрапрадедушки/прапрапрапрабабушки (7 поколение)",
"двоюродные прапрапрапрапрадедушки/прапрапрапрапрабабушки (8 поколение)",
]
_nephew_level = [
"",
"племянник",
"внучатый племянник",
"правнучатый племянник",
"праправнучатый племянник",
"прапраправнучатый племянник",
"прапрапраправнучатый племянник",
]
_niece_level = [
"",
"племянница",
"внучатая племянница",
"правнучатая племянница",
"праправнучатая племянница",
"прапраправнучатая племянница",
"прапрапраправнучатая племянница",
]
_nephews_nieces_level = [
"",
"братья/сестры",
"племянники",
"внучатые племянники",
"правнучатые племянники",
"праправнучатые племянники",
"прапраправнучатые племянники",
"прапрапраправнучатые племянники",
]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_parents(self, level):
if level > len(_parents_level)-1:
return "дальние родственники"
else:
return _parents_level[level]
def get_junior_male_cousin(self, level, removed):
if removed > len(_junior_male_removed_level)-1 or \
level > len(_male_cousin_level)-1:
return "дальний родственник"
else:
return "%s %s" % (_male_cousin_level[level], _junior_male_removed_level[removed])
def get_senior_male_cousin(self, level, removed):
if removed > len(_senior_male_removed_level)-1 or \
level > len(_male_cousin_level)-1:
return "дальний родственник"
else:
return "%s %s" % (_male_cousin_level[level], _senior_male_removed_level[removed])
def get_junior_female_cousin(self, level, removed):
if removed > len(_junior_female_removed_level)-1 or \
level > len(_male_cousin_level)-1:
return "дальняя родственница"
else:
return "%s %s" % (_female_cousin_level[level], _junior_female_removed_level[removed])
def get_senior_female_cousin(self, level, removed):
if removed > len(_senior_female_removed_level)-1 or \
level > len(_male_cousin_level)-1:
return "дальняя родственница"
else:
return "%s %s" % (_female_cousin_level[level], _senior_female_removed_level[removed])
def get_father(self, level):
if level > len(_father_level)-1:
return "дальний предок"
else:
return _father_level[level]
def get_son(self, level):
if level > len(_son_level)-1:
return "дальний потомок"
else:
return _son_level[level]
def get_mother(self, level):
if level > len(_mother_level)-1:
return "дальний предок"
else:
return _mother_level[level]
def get_daughter(self, level):
if level > len(_daughter_level)-1:
return "дальний потомок"
else:
return _daughter_level[level]
def _get_aunt(self, level, step='', inlaw=''):
if level > len(_sister_level)-1:
return "дальний предок в соседнем поколении"
else:
return _sister_level[level]
def _get_uncle(self, level, step='', inlaw=''):
if level > len(_brother_level)-1:
return "дальний предок в соседнем поколении"
else:
return _brother_level[level]
def _get_sibling(self, level, step='', inlaw=''):
"""
Sibling of unknown gender
"""
return self._get_uncle(level, step, inlaw) + " или " + self._get_aunt(level, step, inlaw)
def get_nephew(self, level):
if level > len(_nephew_level)-1:
return "дальний потомок в соседнем поколении"
else:
return _nephew_level[level]
def get_niece(self, level):
if level > len(_niece_level)-1:
return "дальний потомок в соседнем поколении"
else:
return _niece_level[level]
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
if Gb == 0:
if Ga == 0:
return ('один человек')
elif gender_b == Person.MALE:
return (self.get_father(Ga))
else:
return (self.get_mother(Ga))
elif Ga == 0:
if gender_b == Person.MALE:
return (self.get_son(Gb))
else:
return (self.get_daughter(Gb))
elif Gb == 1:
if gender_b == Person.MALE:
return (self._get_uncle(Ga))
else:
return (self._get_aunt(Ga))
elif Ga == 1:
if gender_b == Person.MALE:
return (self.get_nephew(Gb-1))
else:
return (self.get_niece(Gb-1))
elif Ga > Gb:
if gender_b == Person.MALE:
return (self.get_senior_male_cousin(Gb-1, Ga-Gb))
else:
return (self.get_senior_female_cousin(Gb-1, Ga-Gb))
else:
if gender_b == Person.MALE:
return (self.get_junior_male_cousin(Ga-1, Gb-Ga))
else:
return (self.get_junior_female_cousin(Ga-1, Gb-Ga))
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
rel_str = "дальние родственники"
if Ga == 0:
# These are descendants
if Gb < len(_children_level):
rel_str = _children_level[Gb]
else:
rel_str = "дальние потомки"
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_parents_level):
rel_str = _parents_level[Ga]
else:
rel_str = "дальние предки"
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_siblings_level):
rel_str = _siblings_level[Ga]
else:
rel_str = "дальние дяди/тёти"
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_nephews_nieces_level):
rel_str = _nephews_nieces_level[Gb]
else:
rel_str = "дальние племянники/племянницы"
elif Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
if Gb <= len(_seniors_removed_level) and (Ga-Gb) < len(_cousin_level):
rel_str = "%s %s" % ( _cousin_level[Gb-1],
_seniors_removed_level[Ga-Gb] )
else:
rel_str = "(старшие) дальние родственники"
else:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
if Ga <= len(_juniors_removed_level) and (Gb-Ga) < len(_cousin_level):
rel_str = "%s %s" % ( _cousin_level[Ga-1],
_juniors_removed_level[Gb-Ga] )
else:
rel_str = "(младшие) дальние родственники"
if in_law_b == True:
# TODO: Translate this!
rel_str = "spouses of %s" % rel_str
return rel_str
# TODO: def get_sibling_relationship_string for Russian step and inlaw relations
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_ru.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gprime.relationship import test
RC = RelationshipCalculator()
test(RC, True)
| sam-m888/gprime | gprime/plugins/rel/rel_ru.py | Python | gpl-2.0 | 17,326 | [
"Brian"
] | 18bb27e7f8556f0d6a12e66c74b238608e97cdfc954fe6d5cee0043d66a61760 |
from octopus.server.DBInterface import DBInterface
from octopus.shelltool.PipeTool import PipeTool
class JoernTool(PipeTool):
def __init__(self, DESCRIPTION):
PipeTool.__init__(self, DESCRIPTION)
self.dbName = None
self.argParser.add_argument('project')
# @Override
def streamStart(self):
self.dbInterface = DBInterface()
self.dbInterface.disable_json()
self.dbInterface.connectToDatabase(self.args.project)
def _runGremlinQuery(self, query):
return self.dbInterface.runGremlinQuery(query)
| octopus-platform/joern | python/joern-tools/joern/shelltool/JoernTool.py | Python | lgpl-3.0 | 567 | [
"Octopus"
] | 7ae97df16e2ee379dd29197a8f0fb321a8dd168e7802839d5f3521482a41fa23 |
"""
File: sessionUpload.py
Author: Levi Bostian ([email protected])
Description: Class uploading data to Dropbox
***NOTE***
Linked to 1 Dropbox account. Not universal
Following commands to get to work:
dropboxObj = DropboxUploader()
dropboxObj.authenticate()
dropboxObj.uploadFile("testFile.txt")
fileURL = dropboxObj.getVSASLink("testFile.txt") #returns URL to file
fileURL = dropboxOjb.getDBLink("testFile.txt") #for photo
print test
References: https://www.dropbox.com/developers
"""
from dropbox import client, rest, session
import json
import pprint
class DropboxUploader:
def __init__(self):
self.APP_KEY = 'y7cxubkm19o3f9b'
self.APP_SECRET = '8rguqnx7oqwjqtm'
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
self.ACCESS_TYPE = 'app_folder'
self.url = ""
self.fileName = ""
def authenticate(self):
token_file = open('dropbox_token.txt', 'r')
token_key,token_secret = token_file.read().split('|')
token_file.close()
self.sess = session.DropboxSession(self.APP_KEY,self.APP_SECRET, self.ACCESS_TYPE)
self.sess.set_token(token_key,token_secret)
self.connection = client.DropboxClient(self.sess)
def getAccountInfo(self):
return self.connection.account_info()
# uploads file you specify to Dropbox. Method returns response Dropbox returns when uploading
def uploadFile(self, filePath):
if filePath[0:1] == '/':
self.filePath = filePath[1:]
else:
self.filePath = filePath
file = open(self.filePath)
self.getFileName(filePath)
return self.connection.put_file('/fileUploader_linkCreator/'+self.fileName, file)
def getFileName(self, filePath):
split = filePath.split("/")
self.fileName = split[-1]
def getVSASLink(self, fileLocation):
if fileLocation[0:1] == '/':
fileLocation = fileLocation[1:]
self.getFileName(fileLocation)
try:
jsonData = self.connection.media('/fileUploader_linkCreator/'+self.fileName)
jsonDataString = json.dumps(jsonData)#encode JSON data
jsonDataDecoded = json.loads(jsonDataString)#decode JSON data
dropboxURL = jsonDataDecoded["url"]
dropboxURLSplit = dropboxURL.split("/")
self.url = "http://vsassoftware.com/video/index.php?id=" + dropboxURLSplit[5] + "&path=" + self.fileName
return self.url
except:
return "filePath argument not found in Dropbox"
def getDBLink(self, fileLocation):
if fileLocation[0:1] == '/':
fileLocation = fileLocation[1:]
self.getFileName(fileLocation)
try:
jsonData = self.connection.media('/fileUploader_linkCreator/'+self.fileName)
jsonDataString = json.dumps(jsonData)#encode JSON data
jsonDataDecoded = json.loads(jsonDataString)#decode JSON data
return jsonDataDecoded["url"]
except:
return "filePath argument not found in Dropbox"
def getAuthenticationURL(self):
self.request_token = self.sess.obtain_request_token()
return self.sess.build_authorize_url(self.request_token)
# This will fail if the user didn't visit the above URL and hit 'Allow'
def saveToken(self):
try:
access_token = self.sess.obtain_access_token(self.request_token)
except:
return "call getAuthencationURL() to get URL"
#Okay, now we are ready to save the access_token
tokensFileStr = 'dropbox_token.txt'
tokenFile = open(tokensFileStr, 'w')
tokenFile.write("%s|%s" % (access_token.key,access_token.secret))
tokenFile.close()
| levibostian/VSAS | leviTesting/dropbox_sdk/testScripts/dropbox_vsas.py | Python | mit | 4,010 | [
"VisIt"
] | 4559ad0669c746f095d8dae4df5c3288892d29633fb43c7463038bfab4a87a26 |
import cv2
import numpy as np
import pyscreenshot as ImageGrab
from tkinter import *
from Xlib import display
import mss
import os
import pyxhook
import serial
import threading
import serial.tools.list_ports
import ctypes
import glob
from PIL import ImageTk, Image
import sys
with open(os.devnull, 'w') as f: ###shutting up pygame silliness
oldstdout = sys.stdout# disable stdout
sys.stdout = f
import pygame
sys.stdout = oldstdout# enable stdout
version = '0.9.4l'
print('CHMACHINE Ver. %s \n' %version)
class motorclass():
def __init__(self):
self.state=2
self.tempo=0
self.savestate=2
self.result=0
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create an array of zeros for the black background
self.index=0
self.listindex=0
self.timetic=0
self.patternspeed=0
self.speed='0'
self.targetspeed='0'
self.pinresttime=0
self.serialfloodlimit=5 #time(ms) between commands to limit serial flooding
def detect(self):
global stream_window_open
global stream_window
while detectflag==True:
if arduino_connected==False:
pygame.time.wait(1)
while ((self.state==5) or (self.state==1)): #################### DETECT/DETECT SETUP
if self.state==5 and self.getspeed()!=0 and arduino_connected==True:
self.PWMpin('0')
try: #take care of sliding too fast which result in a shape out of boundaries
self.monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arr = np.array(mss.mss().grab(self.monitor))
self.result = cv2.matchTemplate(arr, arrbase, cv2.TM_CCOEFF_NORMED) #check if images match
if self.state==5:
if self.result>0:
print ('%.0f %% Match' %(self.result*100))
else:
print ('0 % Match')
if checkinv==False: #invert flag False
if (self.result>=threshold): # if match value is over the threshold
if self.state==1 and arduino_connected==True:
self.PWMpin(speed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
cv2.circle(self.colorshow,(0,0), 70, (0,255,0), -1) #draw a green circle
self.tempo=pygame.time.get_ticks()
elif (pygame.time.get_ticks()-self.tempo) >= (timeonvar): #turn the pin to floor speed some time after the last match is occurred
if self.state==1 and arduino_connected==True:
self.PWMpin(floorspeed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
else: #invert flag True
if (self.result<=threshold):#match if value is under the threshold
if self.state==1 and arduino_connected==True:
self.PWMpin(speed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
cv2.circle(self.colorshow,(0,0), 70, (0,255,0), -1) #draw a green circle
self.tempo=pygame.time.get_ticks()
elif (pygame.time.get_ticks()-self.tempo) >= (timeonvar): #turn the pin to floor speed some time after the last match is occurred
if self.state==1 and arduino_connected==True:
self.PWMpin(floorspeed)
self.colorshow=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #create a black background
###centering and overlapping images over background:
x_offset=int((streamwindowssizex - screenshotsizex)/2)
y_offset=int((streamwindowssizey - screenshotsizey)/2)
self.colorshow[y_offset:y_offset + arr.shape[0], x_offset:x_offset + arr.shape[1]] = arr
###
except:
pass
if stream_window_open==False: # open "stream" window if isn't already opened
stream_window= Toplevel()
stream_window.resizable(False,False)
stream_window.title('Stream')
stream_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
stream_window.protocol("WM_DELETE_WINDOW", on_closing_stream_window)
stream_canvas = Canvas(stream_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
stream_canvas.pack()
stream_window_open=True
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
stream_window.tk.call('wm', 'iconphoto', stream_window._w, imgicon)
except:
pass
ontop()
else:
###showing image:
self.im = cv2.cvtColor(self.colorshow, cv2.COLOR_BGRA2RGB)
self.imag = Image.fromarray(self.im)
self.imagi = ImageTk.PhotoImage(image=self.imag)
stream_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=self.imagi)
stream_canvas.image=self.imagi #to keep a reference else it shows blank
if self.state==1 and arduino_connected==True:
self.PWMpin(str(self.getspeed())) #Keeps the PWM pin alive(see Arduino code)
###
pygame.time.wait(1)
while self.state==2 and arduino_connected==True:#################### STOP/PAUSE
self.PWMpin('0')
pygame.time.wait(1)
while self.state==3 and arduino_connected==True: ##################### ALWAYSON/PATTERN
if patternvar=='none':
self.PWMpin(speed)
pygame.time.wait(1)
else:
self.listindex=namelist.index(patternvar)
if pygame.time.get_ticks()-self.timetic>=timeonvar/100:
if self.index < len(patternlist[self.listindex])-1:
self.index+=1
else:
self.index=0
self.patternspeed=int(round(patternlist[self.listindex][self.index]/100*int(speed)))
if self.patternspeed>=int(floorspeed):
self.PWMpin(str(self.patternspeed))
if self.patternspeed<int(floorspeed) and self.listindex>1:
self.PWMpin(floorspeed)
self.timetic=pygame.time.get_ticks()
pygame.time.wait(1)
while self.state==4 and arduino_connected==True:######################### PULSE
self.tempo=pygame.time.get_ticks()
while (pygame.time.get_ticks()-self.tempo) <= (timeonvar):
if self.state!=4:
break
self.PWMpin(speed)
pygame.time.wait(1)
self.tempo=pygame.time.get_ticks()
while (pygame.time.get_ticks()-self.tempo) <= (timeoffvar):
if self.state!=4:
break
self.PWMpin(floorspeed)
pygame.time.wait(1)
def getspeed(self):
return int(self.speed)
def PWMpin(self, PWM_speed): #set the Arduino pin PWM
global arduino_connected
try:
if (pygame.time.get_ticks()-self.pinresttime) > self.serialfloodlimit: #limit serial flooding
self.speed=PWM_speed
arduino.write(('V' + self.speed + 'S').encode('utf-8'))
self.pinresttime=pygame.time.get_ticks()
except serial.SerialTimeoutException:
print('WRITE TIMEOUT ERROR.')
arduino.close()
self.stop()
arduino_connected=False
except:
print('SERIAL CONNECTION ERROR')
arduino.close()
self.stop()
arduino_connected=False
def stop(self):
self.state=2
self.savestate=2
def pause(self):
if self.state!=2:
self.savestate=self.state
self.state=2
elif self.state==2:
self.state=self.savestate
def startdetect(self):
self.state=1
self.savestate=self.state
def alwayson_pattern(self):
self.state=3
self.savestate=self.state
def pulse(self):
self.state=4
self.savestate=self.state
def setup(self):
self.state=5
self.savestate=self.state
def keysetup(file):
global pausebutton
global slowdownbutton
global speedupbutton
global screenshotbutton
global refreshbutton
global savebutton
global loadbutton
linelist=[]
###default keys:
pausebutton='P_Insert'
slowdownbutton='P_End'
speedupbutton='P_Down'
screenshotbutton='F9'
refreshbutton='F10'
savebutton='F11'
loadbutton='F12'
###
try:
setup = open(file, 'r')
for x in range(100):
linelist=setup.readline().replace(' ', '').strip().split('=') #read line, remove spaces and split the string a the "=" sign
if linelist[0]== '***':
break
if linelist[0] == 'Pause':
pausebutton=linelist[1]
if linelist[0] == 'Slowdown':
slowdownbutton=linelist[1]
if linelist[0] == 'Speedup':
speedupbutton=linelist[1]
if linelist[0] == 'Screenshot':
screenshotbutton=linelist[1]
if linelist[0] == 'Refresh':
refreshbutton=linelist[1]
if linelist[0] == 'Loadstate':
loadbutton=linelist[1]
if linelist[0] == 'Savestate':
savebutton=linelist[1]
setup.close()
except:
print('Cannot open', file, ', loading default keys...\n')
print('- HOTKEYS:\n')
print('Pause -------------- ',pausebutton)
print('Slow down ---------- ',slowdownbutton)
print('Speed up ----------- ',speedupbutton)
print('Screenshot --------- ',screenshotbutton)
print('Screenshot update -- ',refreshbutton)
print('Save state --------- ',savebutton)
print('Load state --------- ',loadbutton)
print('')
print('')
def patternsetup(file):
global namelist
global patternlist
linelist=[]
namelist=[]
patternlist=[]
namelist.append('PATTERN')
namelist.append('none')
patternlist.append([0])
patternlist.append([0])
try:
patterntxt = open(file, 'r')
for x in range(1000):
linelist=patterntxt.readline()
if linelist.strip()== '***': #strip() removes spaces and end of line characters
break
try:
if linelist.count('=')==1 and linelist.count(':')>0:
linelist=linelist.replace(' ', '').replace(',', '.').strip().split('=') #read line, remove spaces, convert "," to "." and split the string at the "=" sign
if linelist[0] != '' and linelist[1]!= '':
namelist.append(linelist[0][0:18])
stringlist=linelist[1].split(':')
intlist = [int(round(float(i))) for i in stringlist]#converts list of strings into rounded integers
patternlist.append(intlist)
except:
print(file, 'FORMAT ERROR\n')
patterntxt.close()
except:
print('Cannot open', file, '\n')
def comportsetup():
global ports
ports = list(serial.tools.list_ports.comports()) # detects available ports
print ('- AVAILABLE PORTS:\n')
for p in ports:
print (p)
print('')
def autoserialstart(baud):
checkAO.configure(state=DISABLED)
checkPUL.configure(state=DISABLED)
checkDET.configure(state=DISABLED)
checkSET.configure(state=DISABLED)
buttonserial.configure(state=DISABLED)
comentry.insert(END, "PLEASE WAIT...") # insert text into the widget
comentry.configure(state=DISABLED)
global arduino
global arduino_connected
line=('')
portnumber=('')
comentry.delete(0, END) # delete text from the widget from position 0 to the END
root.focus() #remove focus from the entry widget
resetGUI()
motor.stop()
print("Looking for the CH Machine, PLEASE WAIT...\n")
for p in ports:
arduino_connected=False
try:#try to close already existing serial connection
arduino.close()
while arduino.is_open:
pygame.time.wait(1)
except:
pass
try:
print (p[0] + '...')
arduino = serial.Serial(p[0], baud, timeout = 1, write_timeout = 1) # always a good idea to specify a timeout in case we send bad data
pygame.time.wait(3000)# wait for Arduino to initialize
arduino.write(('T').encode('utf-8'))
pygame.time.wait(150)# wait for a response from Arduino
line = arduino.read(arduino.inWaiting()).decode(encoding='UTF-8',errors='replace')
if line.find('connOK')!=-1:
print("CHM CONNECTED!")
print (p[0] + ' - Initialization Complete.')
arduino_connected=True
break
else:
print ('Wrong serial connection.')
except:
print ('Serial port exception')
if line.find('connOK')==-1:
print ('\nCHMachine not found, check out the connection.\n')
checkAO.configure(state=NORMAL)
checkPUL.configure(state=NORMAL)
checkDET.configure(state=NORMAL)
checkSET.configure(state=NORMAL)
buttonserial.configure(state=NORMAL)
comentry.configure(state=NORMAL)
comentry.delete(0, END)
return True
def serialstart(COMstring, baud):
global arduino_connected
global arduino
line=('')
comentry.delete(0, 'end') # delete text from the widget
root.focus() #remove focus from the entry widget
if COMstring == ('') or COMstring == ('COM Port'): #if no port is specified start autoserialstart() to find it automatically
tserial=threading.Thread(target=autoserialstart, args={serialbaud})
tserial.setDaemon(True)
tserial.start()
#manual port:
else:
print (COMstring + ' - Initializing...')
resetGUI()
arduino_connected=False
motor.stop()
try:
if arduino.is_open:
arduino.close()
pygame.time.wait(500)
except:
pass
try:
arduino = serial.Serial(COMstring, baud, timeout = 1, write_timeout = 1) # 2=Com3 on windows always a good idea to specify a timeout in case we send bad data
pygame.time.wait(4000)# wait for the Arduino to initialize
#test the connection(see Arduino code):
arduino.write(('T').encode('utf-8'))
pygame.time.wait(300)
line = arduino.read(arduino.inWaiting()).decode(encoding='UTF-8',errors='replace')
if line.find('connOK')!=-1:
print("CHM CONNECTED!")
print (COMstring + ' - Initialization Complete.')
arduino_connected=True
else:
print ('Wrong serial connection.')
arduino.close()
except serial.SerialTimeoutException:
print (COMstring + ' TIMEOUT EXCEPTION. Try another port.')
arduino.close()
arduino_connected=False
except:
print('No port found.')
def onKeyDown(event):
global speed
global pos
global arrbase
global savelist
global loadlist
global top
global left
global screenshotsizex
global screenshotsizey
global match_window_open
global match_window
global match_canvas
# never put any condition before event.key
if event.Key == ('Return'):
if comentry==root.focus_get() and comentry.get()!=(''):
serialstart(comtext.get(), serialbaud)
if event.Key == (slowdownbutton):
speedint=int(speed)
if (checkAOVar.get()==True or checkPULVar.get()==True or checkDETVar.get()==True):
if speedint>10:
speedint -= 10
motorspeed.set(speedint)
speed=str(speedint)
else:
motorspeed.set(0)
speed=('0')
if event.Key == (speedupbutton):
speedint=int(speed)
if (checkAOVar.get()==True or checkPULVar.get()==True or checkDETVar.get()==True):
if speedint <= 245:
speedint += 10
motorspeed.set(speedint)
speed=str(speedint)
else:
motorspeed.set(255)
speed=('255')
if event.Key == (pausebutton):
motor.pause()
if (event.Key == screenshotbutton or event.Key == refreshbutton):
if (event.Key == screenshotbutton):
mousedata=display.Display().screen().root.query_pointer()._data
pos=[mousedata['root_x'], mousedata['root_y']]
if (pos != [-1,-1]):
print('Mouse position',pos)
###find black border width:
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
###
top=int((pos[1]-screenshotsizey/2))
left=int((pos[0]-screenshotsizex/2))
###adjusting screenshot position so it stays into screen boundaries:
if left<0:
left=0
if top<0:
top=0
if left + screenshotsizex > screenwidth:
left=int(screenwidth-screenshotsizex)
if top + screenshotsizey > screenheight:
top=int(screenheight-screenshotsizey)
###
monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arrbase = np.array(mss.mss().grab(monitor))
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
x_offset=int((streamwindowssizex-screenshotsizex)/2)
y_offset=int((streamwindowssizey-screenshotsizey)/2)
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
if match_window_open==False:# open "match" window if isn't already opened
match_window= Toplevel()
match_window.resizable(False,False)
match_window.title('Match')
match_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
match_window.protocol("WM_DELETE_WINDOW", on_closing_match_window)
match_canvas = Canvas(match_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
match_canvas.pack()
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
match_window.tk.call('wm', 'iconphoto', match_window._w, imgicon)
except:
pass
ontop()
match_window_open=True
###show image:
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
match_canvas.image=imagi #to keep a reference in scope else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
###
if event.Key == (savebutton):
filesname=glob.glob(os.path.abspath(os.path.dirname(__file__)) + "/*.npz") #find name of all .npz files in the main folder
savelist=[]
for x in filesname:
try: #in case of a miswritten file name
x = x[-9:-4]
x=x.strip('save')
num=int(x)
savelist.append(num)
except:
pass
if savelist!=[]:
savename=(os.path.abspath(os.path.dirname(__file__)) + '/save' + str(max(savelist) + 1) + '.npz') #find the max value to add to the string
else:
savename=(os.path.abspath(os.path.dirname(__file__)) + '/save0.npz')
np.savez(savename, arrbase, pos, int(screenshotsizex), int(screenshotsizey), speed, floorspeed, timeonvar, timeoffvar, threshold, checkinv)
print(savename, 'SAVED')
loadlist=[]
if event.Key == (loadbutton):
filesname=glob.glob(os.path.abspath(os.path.dirname(__file__)) + "/*.npz") #find name of all npz files in the main folder
if loadlist==[]:
for x in filesname:
try: #in case of a miswritten file name
x = x[-9:-4]
x=x.strip('save')
num=int(x)
loadlist.append(num)
except:
pass
loadlist.sort() #sort numbers in the list
if loadlist!=[]:
loadname=(os.path.abspath(os.path.dirname(__file__)) + '/save' + str(loadlist.pop()) + '.npz') # pop() removes last element and return it
loaded_arrays = np.load(loadname)
load_state(loaded_arrays['arr_0'], loaded_arrays['arr_1'], loaded_arrays['arr_2'], loaded_arrays['arr_3'], loaded_arrays['arr_4'],
loaded_arrays['arr_5'], loaded_arrays['arr_6'], loaded_arrays['arr_7'], loaded_arrays['arr_8'], loaded_arrays['arr_9'])
print(loadname, 'LOADED')
else:
print('nothing to load')
return True
def load_state(image_arrayl, posl, xsizel, ysizel, speedl, floorspeedl, timeonvarl, timeoffvarl, thresholdl, checkinvl):
global screenshotsizex
global screenshotsizey
global speed
global timeonvar
global timeoffvar
global floorspeed
global threshold
global arrbase
global arr
global pos
global top
global left
global checkinv
global match_window_open
global match_window
global match_canvas
###load variables and update interface:
motorspeed.set(speedl)
speed=str(speedl)
timeON.set(timeonvarl)
timeonvar=timeON.get()
timeOFF.set(timeoffvarl)
timeoffvar=timeOFF.get()
floorspeedVAR.set(floorspeedl)
floorspeed=str(floorspeedVAR.get())
thresh.set(thresholdl * 100)
threshold=thresholdl
if checkinvl == True:
checkinvert.select()
checkinv=True
else:
checkinvert.deselect()
checkinv=False
###
###load and display image:
if posl[0] != -1:
pos = [posl[0], posl[1]]
top=int((posl[1]-screenshotsizey/2))
left=int((posl[0]-screenshotsizex/2))
arrbase=image_arrayl
arr=image_arrayl
sizex.set(xsizel/2)
sizey.set(ysizel/2)
screenshotsizex=(xsizel/2)*(streamwindowssizex - 20)/100
screenshotsizey=(ysizel/2)*(streamwindowssizey - 20)/100
x_offset=int((streamwindowssizex - screenshotsizex)/2)
y_offset=int((streamwindowssizey - screenshotsizey)/2)
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
if match_window_open==False:# open "match" window if isn't already opened
match_window= Toplevel()
match_window.resizable(False,False)
match_window.title('Match')
match_window.geometry(str(streamwindowssizex) + 'x' + str(streamwindowssizey))
match_window.protocol("WM_DELETE_WINDOW", on_closing_match_window)
match_canvas = Canvas(match_window, width=streamwindowssizex, height=streamwindowssizey, background='black')
match_canvas.pack()
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
match_window.tk.call('wm', 'iconphoto', match_window._w, imgicon)
except:
pass
ontop()
match_window_open=True
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
match_canvas.image=imagi #to keep a reference else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
###
# TKINTER FUNCTIONS:
def alwaysONtick():
try:
arduino.name
if (arduino.is_open):
if checkAOVar.get()==False:
resetGUI()
motor.stop()
if checkAOVar.get()==True:
if patternvar=='none':
resetGUI()
slidera.config(foreground='black')
checkAO.select()
motor.alwayson_pattern()
else:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black', label='PATTERN FREQ:')
sliderd.config(foreground='black')
checkAO.select()
motor.alwayson_pattern()
else:
print('No serial connection')
checkAO.deselect()
except:
print('No serial connection')
checkAO.deselect()
def detecttick():
if (pos==[-1,-1]):
print('Position? (Press', screenshotbutton, 'to take a screenshot)')
checkDET.deselect()
else:
try:
arduino.name
if (arduino.is_open):
if checkDETVar.get()==False:
resetGUI()
motor.stop()
if checkDETVar.get()==True:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black')
sliderd.config(foreground='black')
slidersizex.config(foreground='black')
slidersizey.config(foreground='black')
sliderthresh.config(foreground='black')
checkinvert.config(foreground='black')
checkDET.select()
motor.startdetect()
else:
print('No serial connection')
checkDET.deselect()
except:
print('No serial connection')
checkDET.deselect()
def detectsetup():
if (pos==[-1,-1]):
print('Position? (Press', screenshotbutton, 'to take a screenshot)')
checkSET.deselect()
else:
if checkSETVar.get()==False:
resetGUI()
motor.stop()
if checkSETVar.get()==True:
resetGUI()
sliderb.config(foreground='black')
slidersizex.config(foreground='black')
slidersizey.config(foreground='black')
sliderthresh.config(foreground='black')
checkinvert.config(foreground='black')
checkSET.select()
motor.setup()
def pulsetick():
try:
arduino.name
if (arduino.is_open):
if checkPULVar.get()==False:
resetGUI()
motor.stop()
if checkPULVar.get()==True:
resetGUI()
slidera.config(foreground='black')
sliderb.config(foreground='black')
sliderc.config(foreground='black')
sliderd.config(foreground='black')
checkPUL.select()
motor.pulse()
else:
print('No serial connection')
checkPUL.deselect()
except:
print('No serial connection')
checkPUL.deselect()
def on_closing_match_window():
global match_window_open
global match_window
match_window_open=False
match_window.destroy()
def on_closing_stream_window():
global stream_window_open
global stream_window
stream_window_open=False
stream_window.destroy()
def on_closing():
global detectflag
motor.stop()
detectflag=False
print ('Bye Bye')
pygame.time.wait(1)
hm.cancel()
cv2.destroyAllWindows()
root.quit()
root.destroy()
print ('Be vigilant')
sys.exit()
def slidersize(value):
global arrbase
global screenshotsizex
global screenshotsizey
global top
global left
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
if pos != [-1,-1]:
top=int((pos[1]-screenshotsizey/2))
left=int((pos[0]-screenshotsizex/2))
### adjusting screenshot position so it stays into screen boundaries:
if left<0:
left=0
if top<0:
top=0
if left + screenshotsizex > screenwidth:
left=int(screenwidth-screenshotsizex)
if top + screenshotsizey > screenheight:
top=int(screenheight-screenshotsizey)
###
### show image:
monitor = {"top": top, "left": left, "width": int(screenshotsizex), "height": int(screenshotsizey)}
arrbase = np.array(mss.mss().grab(monitor))
base=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
x_offset=int((streamwindowssizex-screenshotsizex)/2)
y_offset=int((streamwindowssizey-screenshotsizey)/2)
base[y_offset:y_offset+arrbase.shape[0], x_offset:x_offset+arrbase.shape[1]] = arrbase #center the image array
im = cv2.cvtColor(base, cv2.COLOR_BGRA2RGB)
imag = Image.fromarray(im)
imagi = ImageTk.PhotoImage(image=imag)
try: # if the window is not opened an exception occur
match_canvas.image=imagi #to keep a reference else it shows blank
match_canvas.create_image(streamwindowssizex/2, streamwindowssizex/2, image=match_canvas.image)
except:
pass
###
def speedslider(value):
global speed
speed=value
def floorspeedslider(value):
global floorspeed
floorspeed=value
def timeONslider(value):
global timeonvar
timeonvar=int(value)
def timeOFFslider(value):
global timeoffvar
timeoffvar=int(value)
def thresholdslider(value):
global threshold
threshold=int(value)/100
def about():
top = Toplevel()
top.wm_attributes("-topmost", 1)
top.resizable(False,False)
top.focus()
top.geometry("220x150")
top.title('About')
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
top.tk.call('wm', 'iconphoto', top._w, imgicon)
except:
pass
msg = Message(top, width=300, text='COCK HERO MACHINE Ver.' + version)
msga = Message(top, width=300, text='[email protected]')
msgb = Message(top, width=300, text='For more informations visit:')
msgc = Message(top, width=300, text='cockheromachine.blogspot.com\n')
msg.pack()
msga.pack()
msgb.pack()
msgc.pack()
button = Button(top, height=1, width=10, text="OK", command=top.destroy)
button.pack()
def ontop():
if checkontopvar.get()==True:
root.wm_attributes("-topmost", 1) # on top
try:#if the window is not opened an exception occur
match_window.wm_attributes("-topmost", 1)# on top
except:
pass
try:
stream_window.wm_attributes("-topmost", 1)# on top
except:
pass
if checkontopvar.get()==False:
root.wm_attributes("-topmost", 0) # NOT on top
try:
match_window.wm_attributes("-topmost", 0)# NOT on top
except:
pass
try:
stream_window.wm_attributes("-topmost", 0)# NOT on top
except:
pass
def on_entry_click(event):
if comentry.get() == 'COM Port':
comentry.delete(0, "end") # delete all the text in the entry widget
comentry.insert(0, '') #Insert blank
comentry.config(foreground = 'black')
def resetGUI():
checkAO.deselect()
checkPUL.deselect()
checkDET.deselect()
checkSET.deselect()
slidera.config(foreground='gray')
sliderb.config(foreground='gray', label='TIME ON(ms):')
sliderc.config(foreground='gray')
sliderd.config(foreground='gray')
slidersizex.config(foreground='gray')
slidersizey.config(foreground='gray')
sliderthresh.config(foreground='gray')
checkinvert.config(foreground='gray')
def inverttick():
global checkinv
checkinv=not checkinv
def patternmenu(value):
global patternvar
patternvar=value
alwaysONtick()
# TKINTER INTERFACE:
root= Tk()
streamwindowssizex=220
streamwindowssizey=220
comtext = StringVar()
comentry=Entry(root, textvariable=comtext)
comentry.grid(row = 0, column = 0)
comentry.insert(0, "COM Port")
comentry.bind('<FocusIn>', on_entry_click)
comentry.config(fg = 'gray', width=13)
buttonserial=Button(root,height=1, width=8,text='CONNECT', command=lambda:serialstart(comtext.get(), serialbaud))
buttonserial.grid(row = 0, column = 1, sticky=W)
checkontopvar = BooleanVar()
checkontop=Checkbutton(root,text = 'On top', variable=checkontopvar, command=lambda:ontop())
checkontop.grid(row = 0, column = 3)
checkontop.select()
buttonabout=Button(root,height=1, width=8,text='About...', command=lambda:about())
buttonabout.grid(row = 0, column = 4)
patternsetup(os.path.abspath(os.path.dirname(__file__)) + '/pattern.txt')#load patterns
patternvar='none'
pattern_variable = StringVar()
pattern_variable.set("PATTERNS")
optionmenu_widget = OptionMenu(root, pattern_variable, *namelist[1:], command=patternmenu)
optionmenu_widget.grid(row = 2, column=0)
optionmenu_widget.config(width=7)
checkAOVar = IntVar()
checkAO=Checkbutton(root,text = 'ALWAYS ON', command=lambda:alwaysONtick(), variable = checkAOVar)
checkAO.grid(row = 2, column = 1, pady=10)
checkPULVar = IntVar()
checkPUL=Checkbutton(root,text = 'PULSE', command=lambda:pulsetick(), variable = checkPULVar)
checkPUL.grid(row = 2, column = 2, pady=10)
checkDETVar = IntVar()
checkDET=Checkbutton(root,text = 'DETECT', command=lambda:detecttick(), variable = checkDETVar)
checkDET.grid(row = 2, column = 3, pady=10)
checkSETVar = IntVar()
checkSET=Checkbutton(root,text = 'DETECT SETUP', command=lambda:detectsetup(), variable = checkSETVar)
checkSET.grid(row = 2, column = 4, pady=10)
buttonpause=Button(root, height=2, width=60, text='-PAUSE/START-', command=lambda:motor.pause())
buttonpause.grid(row = 4, columnspan = 5, pady=10)
motorspeed=IntVar(value=10)
slidera = Scale(root, from_=0, to=255, orient=HORIZONTAL,length=400.00, variable=motorspeed, label='MOTOR SPEED:', command=speedslider)
slidera.grid(columnspan = 6,pady=5)
speed=(str(motorspeed.get()))
timeON=IntVar(value=200)
sliderb = Scale(root, from_=10, to=1000, orient=HORIZONTAL,length=400.00, variable=timeON, label='TIME ON(ms):', command=timeONslider)
sliderb.grid(columnspan = 7,pady=5)
timeonvar=timeON.get()
timeOFF=IntVar(value=100)
sliderc = Scale(root, from_=10, to=1000, orient=HORIZONTAL,length=400.00, variable=timeOFF, label='TIME OFF(ms):', command=timeOFFslider)
sliderc.grid(columnspan = 8,pady=5)
timeoffvar=timeOFF.get()
floorspeedVAR=IntVar(value=0)
sliderd = Scale(root, from_=0, to=255, orient=HORIZONTAL,length=400.00, variable=floorspeedVAR, label='FLOOR SPEED:', command=floorspeedslider)
sliderd.grid(columnspan = 9,pady=5)
floorspeed=str(floorspeedVAR.get())
sizex=IntVar(value=25)
slidersizex = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=sizex, label='Xsize:', command=slidersize)
slidersizex.grid(columnspan = 10,pady=5)
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
sizey=IntVar(value=25)
slidersizey = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=sizey, label='Ysize:', command=slidersize)
slidersizey.grid(columnspan = 11,pady=5)
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
thresh=IntVar(value=70)
sliderthresh = Scale(root, from_=1, to=100, orient=HORIZONTAL,length=400.00, variable=thresh, label='THRESHOLD:', command=thresholdslider)
sliderthresh.grid(columnspan = 12,pady=5)
threshold=int(thresh.get())/100
checkinv=False
checkinvert=Checkbutton(root,text = 'Invert', command=inverttick, variable=checkinv)
checkinvert.grid(columnspan = 13)
#THREADS:
detectflag=True
arduino_connected=False
motor=motorclass()
tmotordetect=threading.Thread(target=motor.detect, args=())
tmotordetect.setDaemon(True)
tmotordetect.start()
#INITIALIZING:
pos=[-1,-1]
top=0
left=0
savelist=[]
loadlist=[]
screenshotsizex=sizex.get()*(streamwindowssizex - 20)/100
screenshotsizey=sizey.get()*(streamwindowssizey - 20)/100
arrbase=np.zeros((streamwindowssizex, streamwindowssizey, 4), np.uint8) #an array of zeros for a black background
serialbaud=9600
arduino=None
keysetup(os.path.abspath(os.path.dirname(__file__)) + '/setup.txt') #assign keys from setup.txt
comportsetup() #list all available com ports
pygame.init()
hm = pyxhook.HookManager() # hooking keyboard
hm.KeyDown = onKeyDown
hm.HookKeyboard()
hm.start()
pygame.event.pump()
root.withdraw()
root.wm_attributes("-topmost", 1)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.title('CHM ' + version)
root.resizable(False,False)
try:
iconpath=os.path.abspath(os.path.dirname(__file__)) + '/icon.gif'
imgicon = PhotoImage(file=iconpath)
root.tk.call('wm', 'iconphoto', root._w, imgicon)
except:
pass
screenwidth, screenheight = root.winfo_screenwidth(), root.winfo_screenheight()# get the screen resolution
match_window_open=False
stream_window_open=False
root.deiconify()
resetGUI()
ontop()
root.mainloop()
| CHMachine/CHMachine-Software | CHMachine_Software_Linux/CHMachine.py | Python | bsd-3-clause | 41,681 | [
"VisIt"
] | b7232dd025b62317ac3b52d70fd06e4d972a40476d0c6b8d1ad3f6ae4db87bf6 |
# Copyright (C) 2013 Oskar Maier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Oskar Maier
# version r0.3.0
# since 2013-08-23
# status Release
# build-in modules
# third-party modules
import numpy
from scipy.ndimage.filters import gaussian_filter
# path changes
# own modules
from .utilities import xminus1d
# code
def gauss_xminus1d(img, sigma, dim=2):
r"""
Applies a X-1D gauss to a copy of a XD image, slicing it along dim.
Essentially uses `scipy.ndimage.filters.gaussian_filter`, but
applies it to a dimension less than the image has.
Parameters
----------
img : array_like
The image to smooth.
sigma : integer
The sigma i.e. gaussian kernel size in pixel
dim : integer
The dimension along which to apply the filter.
Returns
-------
gauss_xminus1d : ndarray
The input image ``img`` smoothed by a gaussian kernel along dimension ``dim``.
"""
img = numpy.array(img, copy=False)
return xminus1d(img, gaussian_filter, dim, sigma=sigma)
def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1):
r"""
Edge-preserving, XD Anisotropic diffusion.
Parameters
----------
img : array_like
Input image (will be cast to numpy.float).
niter : integer
Number of iterations.
kappa : integer
Conduction coefficient, e.g. 20-100. ``kappa`` controls conduction
as a function of the gradient. If ``kappa`` is low small intensity
gradients are able to block conduction and hence diffusion across
steep edges. A large value reduces the influence of intensity gradients
on conduction.
gamma : float
Controls the speed of diffusion. Pick a value :math:`<= .25` for stability.
voxelspacing : tuple of floats or array_like
The distance between adjacent pixels in all img.ndim directions
option : {1, 2, 3}
Whether to use the Perona Malik diffusion equation No. 1 or No. 2,
or Tukey's biweight function.
Equation 1 favours high contrast edges over low contrast ones, while
equation 2 favours wide regions over smaller ones. See [1]_ for details.
Equation 3 preserves sharper boundaries than previous formulations and
improves the automatic stopping of the diffusion. See [2]_ for details.
Returns
-------
anisotropic_diffusion : ndarray
Diffused image.
Notes
-----
Original MATLAB code by Peter Kovesi,
School of Computer Science & Software Engineering,
The University of Western Australia,
pk @ csse uwa edu au,
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal,
Department of Pharmacology,
University of Oxford,
<[email protected]>
Adapted to arbitrary dimensionality and added to the MedPy library Oskar Maier,
Institute for Medical Informatics,
Universitaet Luebeck,
<[email protected]>
June 2000 original version. -
March 2002 corrected diffusion eqn No 2. -
July 2012 translated to Python -
August 2013 incorporated into MedPy, arbitrary dimensionality -
References
----------
.. [1] P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
.. [2] M.J. Black, G. Sapiro, D. Marimont, D. Heeger
Robust anisotropic diffusion.
IEEE Transactions on Image Processing,
7(3):421-432, March 1998.
"""
# define conduction gradients functions
if option == 1:
def condgradient(delta, spacing):
return numpy.exp(-(delta/kappa)**2.)/float(spacing)
elif option == 2:
def condgradient(delta, spacing):
return 1./(1.+(delta/kappa)**2.)/float(spacing)
elif option == 3:
kappa_s = kappa * (2**0.5)
def condgradient(delta, spacing):
top = 0.5*((1.-(delta/kappa_s)**2.)**2.)/float(spacing)
return numpy.where(numpy.abs(delta) <= kappa_s, top, 0)
# initialize output array
out = numpy.array(img, dtype=numpy.float32, copy=True)
# set default voxel spacing if not supplied
if voxelspacing is None:
voxelspacing = tuple([1.] * img.ndim)
# initialize some internal variables
deltas = [numpy.zeros_like(out) for _ in range(out.ndim)]
for _ in range(niter):
# calculate the diffs
for i in range(out.ndim):
slicer = [slice(None, -1) if j == i else slice(None) for j in range(out.ndim)]
deltas[i][slicer] = numpy.diff(out, axis=i)
# update matrices
matrices = [condgradient(delta, spacing) * delta for delta, spacing in zip(deltas, voxelspacing)]
# subtract a copy that has been shifted ('Up/North/West' in 3D case) by one
# pixel. Don't as questions. just do it. trust me.
for i in range(out.ndim):
slicer = [slice(1, None) if j == i else slice(None) for j in range(out.ndim)]
matrices[i][slicer] = numpy.diff(matrices[i], axis=i)
# update the image
out += gamma * (numpy.sum(matrices, axis=0))
return out
| loli/medpy | medpy/filter/smoothing.py | Python | gpl-3.0 | 5,863 | [
"Gaussian"
] | 9228546abb15a4efe8276354ffce2daa4f9468d2efd7dcfd576fdd7e986f7376 |
'''
* Created by Zhenia Syryanyy (Yevgen Syryanyy)
* e-mail: [email protected]
* License: this code is under GPL license
* Last modified: 2017-10-18
'''
import sys
import os
from copy import deepcopy
from collections import OrderedDict as odict
from itertools import cycle
from io import StringIO
import inspect
import numpy as np
from scipy.optimize import differential_evolution
import matplotlib.gridspec as gridspec
from matplotlib import pylab
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import interp1d
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline, splrep, splev, splprep
import re
from shutil import copyfile
from libs.dir_and_file_operations import listOfFilesFN, listOfFiles, listOfFilesFN_with_selected_ext
from feff.libs.numpy_group_by_ep_second_draft import group_by
from scipy.signal import savgol_filter
from feff.libs.fit_current_curve import return_fit_param, func, f_PM, f_diff_PM_for_2_T, \
linearFunc, f_PM_with_T, f_SPM_with_T
from scipy.optimize import curve_fit, leastsq
from feff.libs.math_libs import approx_errors
g_J_Mn2_plus = 5.92
g_J_Mn3_plus = 4.82
g_e = 2.0023 # G-factor Lande
mu_Bohr = 927.4e-26 # J/T
Navagadro = 6.02214e23 #1/mol
k_b = 1.38065e-23 #J/K
rho_GaAs = 5.3176e3 #kg/m3
mass_Molar_kg_GaAs = 144.645e-3 #kg/mol
mass_Molar_kg_Diamond = 12.011e-3 # diamond
rho_Diamond = 3.515e3
testX1 = [0.024, 0.026, 0.028, 0.03, 0.032, 0.034, 0.036, 0.038, 0.03, 0.0325]
testY1 = [0.6, 0.527361, 0.564139, 0.602, 0.640714, 0.676684, 0.713159, 0.7505, 0.9, 0.662469]
testArray = np.array([testX1, testY1])
def fromRowToColumn (Array = testArray):
# if num of columns bigger then num of rows then transpoze that marix
n,m = Array.shape
if n < m:
return Array.T
else:
return Array
def sortMatrixByFirstColumn(Array = fromRowToColumn(testArray), colnum = 0):
# return sorted by selected column number the matrix
return Array[Array[:, colnum].argsort()]
out = sortMatrixByFirstColumn()
# print('sorted out:')
# print(out)
# print('--')
def deleteNonUniqueElements(key = out[:, 0], val = out[:, 1]):
# calc the val.mean value for non-uniq key values
# u, idx = np.unique(Array[:, key_colnum], return_index=True)
return fromRowToColumn(np.array(group_by(key).mean(val)))
# print('mean :')
# print(deleteNonUniqueElements())
# print('--')
def from_EMU_cm3_to_A_by_m(moment_emu = 2300e-8, V_cm3 = 3e-6):
# return value of Magnetization in SI (A/m)
return (moment_emu / V_cm3)*1000
def concentration_from_Ms(Ms = 7667, J=2.5):
# return concentration from Ms = n*gj*mu_Bohr*J = n*p_exp*mu_Bohr
return Ms/mu_Bohr/J/g_e
def number_density(rho = rho_GaAs, M = mass_Molar_kg_GaAs):
# return concentration from Molar mass
return Navagadro*rho/M
class MagneticPropertiesOfPhases:
def __init__(self):
self.concentration_ParaMagnetic = None
self.concentration_ParaMagnetic_error = None
self.J_total_momentum = 2.5
self.Mn_type = 'Mn2+' # Mn2+ or Mn3+
self.spin_type_cfg = 'high' # low or high
self.g_factor = g_e
self.mu_eff = g_J_Mn2_plus
self.info = ''
class MagneticData:
'''
base class for store spectra
'''
COLOR_CYCOL = cycle('bgrcmk')
def __init__(self):
self.magnetic_field = []
self.magnetic_moment = []
self.magnetic_moment_raw = []
self.label = []
self.do_plot = True
self.line_style = '-'
self.line_color = 'cornflowerblue'
self.line_width = 2
self.line_alpha = 1.0
self.line_marker_style = 'o'
self.line_marker_size = 4
self.line_marker_face_color = 'blue'
self.line_marker_edge_color = 'darkblue'
self.temperature = []
self.magnetic_field_shift = []
self.magnetic_moment_shift = []
# for intersection of region for two models:
self.accepted_indices = []
self.history_log = odict()
def append_history_log(self, case=''):
num = len(self.history_log)
num += 1
self.history_log[num] = case
def plot(self, ax=plt.gca()):
if self.do_plot:
ax.plot(self.magnetic_field, self.magnetic_moment,
linestyle=self.line_style,
color=self.line_color,
linewidth=self.line_width,
alpha=self.line_alpha,
label=self.label,
marker=self.line_marker_style,
markersize=self.line_marker_size,
markerfacecolor=self.line_marker_face_color,
markeredgecolor=self.line_marker_edge_color
)
class StructBase:
'''
Describe structure for a data
'''
def __init__(self):
self.raw = MagneticData()
self.prepared_raw = MagneticData()
self.for_fit = MagneticData()
self.line = MagneticData()
self.fit = MagneticData()
self.magnetic_field_inflection_point = 30
self.magnetic_field_step = 0.1 #[T]
self.magnetic_field_minimum = 0
self.magnetic_field_maximum = 0
# obj for saving params about unique phases in current material:
self.current_magnetic_phase_data = MagneticPropertiesOfPhases()
# main magnetic params for a structure which could be saved in unique phases:
self.J_total_momentum = 2.5
self.Mn_type = 'Mn2+' # Mn2+ or Mn3+
self.spin_type_cfg = 'high' # low or high
self.g_factor = g_e
self.mu_eff = g_J_Mn2_plus
self.mu_eff_min = g_J_Mn2_plus - 0.1
self.mu_eff_max = g_J_Mn2_plus + 0.1
self.volumeOfTheFilm_GaMnAs = 0 #[m^3]
self.fit.magnetic_moment = []
self.forFit_y = []
self.forFit_x = []
self.zeroIndex = []
# point of magnetic field which define a outside region where we fit the functions
self.magnetic_field_value_for_fit = 3 # [T]
# number of density for PM fit only for the current temperature:
self.concentration_ParaMagnetic = 0
self.concentration_ParaMagnetic_error = 10
# linear coefficient from the line_subtracted procedure:
self.linear_coefficient = 0
# corrections for curve:
self.y_shift = 0
self.x_shift = 0
# 'IUS' - Interpolation using univariate spline
# 'RBF' - Interpolation using Radial basis functions
# Interpolation using RBF - multiquadrics
# 'Spline'
# 'Cubic'
# 'Linear'
self.typeOfFiltering = 'IUS'
self.R_factor = 100
self.std = 100
self.label_summary = ''
self.title = ''
self.font_size = 18
self.y_label = '$M(A/m)$'
self.x_label = '$B(T)$'
self.dict_of_magnetic_phases = odict()
def addDataToDict(self, current_data):
num = len(self.dict_of_magnetic_phases)
if isinstance(current_data, MagneticPropertiesOfPhases):
self.dict_of_magnetic_phases[num] = odict({'data': current_data})
def flushDictOfSpectra(self):
self.dict_of_magnetic_phases = odict()
def define_Mn_type_variables(self):
'''
# unpaired electrons examples
d-count high spin low spin
d 4 4 2 Cr 2+ , Mn 3+
d 5 5 1 Fe 3+ , Mn 2+
d 6 4 0 Fe 2+ , Co 3+
d 7 3 1 Co 2+
Table: High and low spin octahedral transition metal complexes.
'''
# ===================================================
# Mn2 +
# 5.916, 3d5 4s0, 5 unpaired e-, observed: 5.7 - 6.0 in [muB]
# self.mu_spin_only = np.sqrt(5*(5+2))
# Mn3 +
# 5.916, 3d4 4s0, 4 unpaired e-, observed: 4.8 - 4.9 in [muB]
# self.mu_spin_only = np.sqrt(4*(4+2))
if self.Mn_type == 'Mn2+':
if self.spin_type_cfg == 'high':
self.J_total_momentum = 2.5 # high spin
elif self.spin_type_cfg == 'low':
self.J_total_momentum = 1.5 # low spin ?
self.mu_eff = g_J_Mn2_plus
self.mu_eff_min = 5.7
self.mu_eff_max = 6.0
elif self.Mn_type == 'Mn3+':
if self.spin_type_cfg == 'low':
self.J_total_momentum = 2.0 # ? low-spin, probably because mu_eff is 4.82 from the experiment
elif self.spin_type_cfg == 'high':
self.J_total_momentum = 0.0 # high-spin
self.mu_eff = g_J_Mn3_plus
self.mu_eff_min = 4.8
self.mu_eff_max = 4.9
self.g_factor = self.mu_eff / self.J_total_momentum
def set_Mn2_plus_high(self):
self.Mn_type = 'Mn2+'
self.spin_type_cfg = 'high'
self.define_Mn_type_variables()
def set_Mn2_plus_low(self):
self.Mn_type = 'Mn2+'
self.spin_type_cfg = 'low'
self.define_Mn_type_variables()
def set_Mn3_plus_low(self):
self.Mn_type = 'Mn3+'
self.spin_type_cfg = 'low'
self.define_Mn_type_variables()
def save_magnetic_params_to_current_phase_obj(self):
self.current_magnetic_phase_data.J_total_momentum = self.J_total_momentum
self.current_magnetic_phase_data.Mn_type = self.Mn_type
self.current_magnetic_phase_data.g_factor = self.g_factor
self.current_magnetic_phase_data.spin_type_cfg = self.spin_type_cfg
self.current_magnetic_phase_data.concentration_ParaMagnetic = self.concentration_ParaMagnetic
self.current_magnetic_phase_data.concentration_ParaMagnetic_error = self.concentration_ParaMagnetic_error
self.current_magnetic_phase_data.mu_eff = self.mu_eff
def interpolate_data(self):
x = np.array(self.raw.magnetic_field)
y = np.array(self.raw.magnetic_moment)
if self.magnetic_field_minimum == self.magnetic_field_maximum:
self.magnetic_field_minimum = np.fix(10 * self.raw.magnetic_field.min()) / 10
self.magnetic_field_maximum = np.fix(10 * self.raw.magnetic_field.max()) / 10
if self.magnetic_field_minimum < self.raw.magnetic_field.min():
self.magnetic_field_minimum = np.fix(10 * self.raw.magnetic_field.min()) / 10
if self.magnetic_field_maximum > self.raw.magnetic_field.max():
self.magnetic_field_maximum = np.fix(10 * self.raw.magnetic_field.max()) / 10
self.fit.magnetic_field = \
np.r_[self.magnetic_field_minimum: self.magnetic_field_maximum: self.magnetic_field_step]
if self.typeOfFiltering == 'Linear':
f = interp1d(self.raw.magnetic_field, self.raw.magnetic_moment)
self.fit.magnetic_moment = f(self.fit.magnetic_field)
if self.typeOfFiltering == 'Cubic':
f = interp1d(self.raw.magnetic_field, self.raw.magnetic_moment, kind='cubic')
self.fit.magnetic_moment = f(self.fit.magnetic_field)
if self.typeOfFiltering == 'Spline':
tck = splrep(x, y, s=0)
self.fit.magnetic_moment = splev(self.fit.magnetic_field, tck, der=0)
if self.typeOfFiltering == 'IUS':
f = InterpolatedUnivariateSpline(self.raw.magnetic_field, self.raw.magnetic_moment)
self.fit.magnetic_moment = f(self.fit.magnetic_field)
if self.typeOfFiltering == 'RBF':
f = Rbf(self.raw.magnetic_field, self.raw.magnetic_moment, function = 'linear')
self.fit.magnetic_moment = f(self.fit.magnetic_field)
if abs(self.magnetic_field_minimum) == abs(self.magnetic_field_maximum):
self.y_shift = self.fit.magnetic_moment[-1] - abs(self.fit.magnetic_moment[0])
self.fit.magnetic_moment = self.fit.magnetic_moment - self.y_shift
self.fit.magnetic_moment_shift = self.y_shift
yy_0 = np.r_[0:self.fit.magnetic_moment[-1]:self.fit.magnetic_moment[-1]/100]
f_0 = interp1d(self.fit.magnetic_moment, self.fit.magnetic_field)
xx_0 = f_0(yy_0)
self.x_shift = xx_0[0]
self.fit.magnetic_field = self.fit.magnetic_field - self.x_shift
self.fit.magnetic_field_shift = self.x_shift
# we need to adjust new self.fit.magnetic_field values to a good precision:
if self.magnetic_field_minimum < self.fit.magnetic_field.min():
self.magnetic_field_minimum = np.fix(10 * self.fit.magnetic_field.min()) / 10
if self.magnetic_field_maximum > self.fit.magnetic_field.max():
self.magnetic_field_maximum = np.fix(10 * self.fit.magnetic_field.max()) / 10
xx = np.r_[self.magnetic_field_minimum: self.magnetic_field_maximum: self.magnetic_field_step]
self.zeroIndex = np.nonzero((np.abs(xx) < self.magnetic_field_step*1e-2))
xx[self.zeroIndex] = 0
f = interp1d(self.fit.magnetic_field, self.fit.magnetic_moment)
self.fit.magnetic_moment = f(xx)
self.fit.magnetic_field = xx
self.fit.append_history_log(
case='do interpolation with type of filtering: {}'.format(self.typeOfFiltering))
# store interpolated data of raw data in spacial object:
self.prepared_raw = deepcopy(self.fit)
self.for_fit = deepcopy(self.fit)
self.line = deepcopy(self.fit)
self.raw.label = 'raw: T={0}K'.format(self.raw.temperature)
self.prepared_raw.label = 'prep raw: T={0}K {1}'.format(self.prepared_raw.temperature, self.typeOfFiltering)
self.line.label = 'subtracted line: T={0}K {1}'.format(self.prepared_raw.temperature, self.typeOfFiltering)
self.for_fit.label = 'selected points for fit: T={0}K {1}'.format(self.fit.temperature, self.typeOfFiltering)
self.fit.label = 'fit: T={0}K {1}'.format(self.fit.temperature, self.typeOfFiltering)
def filtering(self):
# do some filtering operations under data:
if self.magnetic_field_minimum == self.magnetic_field_maximum:
self.magnetic_field_minimum = self.raw.magnetic_field.min()
self.magnetic_field_maximum = self.raw.magnetic_field.max()
self.fit.magnetic_field = np.r_[self.magnetic_field_minimum: self.magnetic_field_maximum: self.magnetic_field_step]
window_size, poly_order = 101, 3
self.fit.magnetic_moment = savgol_filter(self.fit.magnetic_moment, window_size, poly_order)
self.fit.append_history_log(
case='apply savgol filter: window = {}, poly_order = {}'.format(window_size, poly_order))
def line_subtracting(self):
indx_plus = (self.prepared_raw.magnetic_field >= self.magnetic_field_value_for_fit)
indx_minus = (self.prepared_raw.magnetic_field <= -self.magnetic_field_value_for_fit)
# > 0:
indx = indx_plus
self.for_fit.magnetic_field = self.prepared_raw.magnetic_field[indx]
self.for_fit.magnetic_moment = self.prepared_raw.magnetic_moment[indx]
# fit the data:
par_plus, pcov = curve_fit(linearFunc,
self.for_fit.magnetic_field,
self.for_fit.magnetic_moment
)
# < 0:
indx = indx_minus
self.for_fit.magnetic_field = self.prepared_raw.magnetic_field[indx]
self.for_fit.magnetic_moment = self.prepared_raw.magnetic_moment[indx]
# fit the data:
par_minus, pcov = curve_fit(linearFunc,
self.for_fit.magnetic_field,
self.for_fit.magnetic_moment
)
self.linear_coefficient = 0.5*(par_plus[0] + par_minus[0])
self.line.magnetic_moment = (self.linear_coefficient * self.prepared_raw.magnetic_field)
# store to for_fit object:
indx = np.logical_or(indx_minus, indx_plus)
self.for_fit.magnetic_field = self.prepared_raw.magnetic_field[indx]
self.for_fit.magnetic_moment = self.prepared_raw.magnetic_moment[indx]
self.prepared_raw.magnetic_moment -= (self.linear_coefficient * self.prepared_raw.magnetic_field)
self.prepared_raw.append_history_log('line k={coef} * B was subtracted'.format(coef=self.linear_coefficient))
self.line.label = 'subtracted line $M-\\mathbf{{k}} \\ast B$: $\\mathbf{{k}}={:1.5}$ '.format(self.linear_coefficient)
# self.for_fit = deepcopy(self.prepared_raw)
self.line.do_plot = True
def fit_PM_single_phase(self):
# do a fit procedure:
# indx = np.argwhere(self.fit.magnetic_field >= 3)
indx = (np.abs(self.prepared_raw.magnetic_field) >= self.magnetic_field_value_for_fit)
# indx = ((self.prepared_raw.magnetic_field) >= self.magnetic_field_value_for_fit)
self.for_fit.magnetic_field = self.prepared_raw.magnetic_field[indx]
self.for_fit.magnetic_moment = self.prepared_raw.magnetic_moment[indx]
self.forFit_x = (self.g_factor * self.J_total_momentum * mu_Bohr * self.for_fit.magnetic_field) \
/ k_b / self.fit.temperature
self.forFit_y = self.for_fit.magnetic_moment
# try to fit concentration of Mn atoms n[1/m^3*1e27]
def fun_tmp(x, n):
return f_PM(x, n, J=self.J_total_momentum, g_factor=self.g_factor)
popt, pcov = curve_fit(fun_tmp,
xdata=self.forFit_x,
ydata=self.forFit_y,
)
self.concentration_ParaMagnetic = popt[0] #[1/m^3*1e27]
self.concentration_ParaMagnetic_error = np.sqrt(np.diag(pcov[0]))
# calc x values for all values of magnetic_field range:
xx = (self.g_factor * self.J_total_momentum * mu_Bohr * self.fit.magnetic_field) \
/ k_b / self.fit.temperature
self.fit.magnetic_moment = fun_tmp(xx, self.concentration_ParaMagnetic)
# fight with uncertainty in 0 vicinity:
self.fit.magnetic_moment[self.zeroIndex] = 0
self.calc_R_factor(raw=self.prepared_raw.magnetic_moment, fit=self.fit.magnetic_moment)
self.fit.label = \
'\nfit [$R=\\mathbf{{{R:1.3}}}\%$, $\sigma=\\mathbf{{{std:1.3}}}$] ' \
'\n$g_{{factor}}=\\mathbf{{{g_f:1.3}}}$, T={temper:2.1g}K\n'\
'$J({Mn_type}$, ${spin_type})=\\mathbf{{{J:1.3}}}$ $[\mu_{{Bohr}}]$'\
'\n$n_{{{Mn_type}}}=({conc:1.4g}\\pm{conc_error:1.4g})\\ast10^{{27}} [1/m^3]$' \
'\n or $\\mathbf{{{conc_GaAs:1.3g}}}\%$ of $n(GaAs)$'.format(
g_f=float(self.g_factor),
R=float(self.R_factor),
std=float(self.std),
temper=float(self.fit.temperature),
Mn_type=self.Mn_type,
spin_type=self.spin_type_cfg,
J=float(self.J_total_momentum),
conc=float(self.concentration_ParaMagnetic),
conc_error=float(np.round(self.concentration_ParaMagnetic_error,4)),
conc_GaAs=float(self.concentration_ParaMagnetic / 22.139136 * 100),
)
print('->> fit PM (single PM phase) have been done. '
'For T = {0} K obtained n = {1:1.3g} *1e27 [1/m^3] or {2:1.3g} % of the n(GaAs)' \
.format(self.raw.temperature,
self.concentration_ParaMagnetic,
self.concentration_ParaMagnetic / 22.139136 * 100))
print('->> R = {R_f:1.5g} %'.format(R_f=self.R_factor))
print('->> J[{Mn_type}, {spin_type}] = {J:1.3} [mu(Bohr)]'.format(
Mn_type=self.Mn_type,
spin_type=self.spin_type_cfg,
J=float(self.J_total_momentum)))
def multi_phase_PM_func(self, n_concentration, magnetic_field, temperature, zero_index=None):
# calc Brillouin function for multi-phase sample
num = len(self.dict_of_magnetic_phases)
len_of_x = len(magnetic_field)
# vector for magnetic calculation:
vec_x = np.zeros(len_of_x)
#concentration of Mn atoms n[1/m^3*1e27]
out = np.zeros(len_of_x)
for i in self.dict_of_magnetic_phases:
val = self.dict_of_magnetic_phases[i]
n = n_concentration[i]
J = val['data'].J_total_momentum
g = val['data'].g_factor
Mn_type = val['data'].Mn_type
spin_type = val['data'].spin_type_cfg
tmp = np.zeros(len_of_x)
# create unique x-vector for Brillouin function:
vec_x = (g * J * mu_Bohr * magnetic_field) \
/ k_b / temperature
# tmp = f_PM(x=vec_x, n=n, J=J, g_factor=g)
tmp = f_PM_with_T(B=magnetic_field, n=n, J=J, T=temperature, g_factor=g)
# fight with uncertainty in 0 vicinity:
if zero_index is not None:
tmp[zero_index] = 0
out += tmp
return out
def fit_PM_multi_phase(self):
# do a fit procedure for the multi-phases magnetic material:
# indx = np.argwhere(self.fit.magnetic_field >= 3)
indx = (np.abs(self.prepared_raw.magnetic_field) >= self.magnetic_field_value_for_fit)
# indx = ((self.prepared_raw.magnetic_field) >= self.magnetic_field_value_for_fit)
self.for_fit.magnetic_field = self.prepared_raw.magnetic_field[indx]
self.for_fit.magnetic_moment = self.prepared_raw.magnetic_moment[indx]
self.forFit_y = self.for_fit.magnetic_moment
len_of_vec = len(self.forFit_y)
self.forFit_x = self.for_fit.magnetic_field
num = len(self.dict_of_magnetic_phases)
# try to fit concentration of Mn atoms n[1/m^3*1e27]
# construct tmp function for a minimization procedure:
def fun_tmp(n_concentration):
out = np.zeros(len_of_vec)
out = self.multi_phase_PM_func(n_concentration,
magnetic_field=self.forFit_x,
temperature=self.fit.temperature)
return self.get_R_factor(raw=self.forFit_y, fit=out)
# create bounds:
bounds = []
for i in range(num):
bounds.append((0, 10))
res = differential_evolution(fun_tmp, bounds)
self.concentration_ParaMagnetic = res.x #[1/m^3*1e27]
s2 = self.get_std(raw=self.forFit_y,
fit=self.multi_phase_PM_func(self.concentration_ParaMagnetic,
magnetic_field=self.forFit_x,
temperature=self.fit.temperature
)
)
se = approx_errors(fun_tmp, self.concentration_ParaMagnetic, epsilon=0.01*np.min(self.concentration_ParaMagnetic))
std = np.sqrt(s2) * se
self.concentration_ParaMagnetic_error = std
self.fit.magnetic_moment = self.multi_phase_PM_func(
self.concentration_ParaMagnetic,
magnetic_field=self.fit.magnetic_field,
temperature=self.fit.temperature,
zero_index=self.zeroIndex
)
# fight with uncertainty in 0 vicinity:
# self.fit.magnetic_moment[self.zeroIndex] = 0
self.calc_R_factor(raw=self.prepared_raw.magnetic_moment, fit=self.fit.magnetic_moment)
# write label for plotting:
self.fit.label = \
'\nFit [$R=\\mathbf{{{R:1.3}}}\%$, $\sigma=\\mathbf{{{std:1.3}}}$], T={temper:2.1g}K\n\n'.format(
R=float(self.R_factor),
std=float(self.std),
temper=float(self.fit.temperature),
)
tmp_txt = ''
for i in self.dict_of_magnetic_phases:
val = self.dict_of_magnetic_phases[i]
n = self.concentration_ParaMagnetic[i]
n_std = self.concentration_ParaMagnetic_error[i]
J = val['data'].J_total_momentum
g = val['data'].g_factor
Mn_type = val['data'].Mn_type
spin_type = val['data'].spin_type_cfg
tmp_txt += 'phase $\\mathbf{{{num_of_phase:}}}$:\n'\
'$g_{{factor}}=\\mathbf{{{g_f:1.3}}}$, $J({Mn_type}$, ${spin_type})=\\mathbf{{{J:1.3}}}$ $[\mu_{{Bohr}}]$'\
.format(
num_of_phase = i,
g_f=float(g),
Mn_type=Mn_type,
J=float(J),
spin_type=spin_type,
)
tmp_txt += '\n$n_{{{Mn_type}}}=({conc:1.4g}\\pm{conc_error:1.4g})\\ast10^{{27}} [1/m^3]$' \
'\n or $\\mathbf{{{conc_GaAs:1.3g}}}\%$ of $n(GaAs)$\n'.format(
Mn_type=Mn_type,
spin_type=spin_type,
conc=float(n),
conc_error=float(np.round(n_std, 4)),
conc_GaAs=float(n / 22.139136 * 100),
)
self.fit.label += tmp_txt
print('==='*15)
print(' fit PM (multi PM phases) have been done. '
'For T = {0} K obtained:' \
.format(self.raw.temperature,)
)
print(' R = {R_f:1.5g} %'.format(R_f=self.R_factor))
for i in self.dict_of_magnetic_phases:
val = self.dict_of_magnetic_phases[i]
n = self.concentration_ParaMagnetic[i]
n_std = self.concentration_ParaMagnetic_error[i]
J = val['data'].J_total_momentum
g = val['data'].g_factor
Mn_type = val['data'].Mn_type
spin_type = val['data'].spin_type_cfg
print('------- phases #{}:'.format(i))
print(' n = ( {n:1.3g} +/- {err:1.4g} )*1e27 [1/m^3] or {n_2:1.3g} % of the n(GaAs)'.format(
n=n,
n_2=n / 22.139136 * 100,
err=n_std
)
)
print(' J[{Mn_type}, {spin_type}] = {J:1.3} [mu(Bohr)]'.format(
Mn_type=Mn_type,
spin_type=spin_type,
J=float(J)))
print('===' * 15)
def set_default_line_params(self):
self.raw.line_style = 'None'
self.raw.line_marker_size = 6
self.raw.line_alpha = 0.2
self.raw.line_marker_face_color = next(MagneticData.COLOR_CYCOL)
self.line.line_style = '-'
self.line.do_plot = False
self.line.line_width = 3
self.line.line_color = 'r'
self.line.line_alpha = 0.3
self.line.line_marker_style = 'None'
self.prepared_raw.line_style = 'None'
self.prepared_raw.line_marker_size = 6
self.prepared_raw.line_marker_style = 'v'
self.prepared_raw.line_alpha = 0.3
self.prepared_raw.line_marker_face_color = next(MagneticData.COLOR_CYCOL)
self.for_fit.line_style = 'None'
self.for_fit.line_marker_size = 12
self.for_fit.line_marker_style = 'D'
self.for_fit.line_alpha = 0.2
self.for_fit.line_marker_face_color = 'g'
self.for_fit.line_marker_edge_color = next(MagneticData.COLOR_CYCOL)
self.fit.line_style = 'None'
self.fit.line_marker_size = 9
self.fit.line_alpha = 0.3
self.fit.line_marker_face_color = next(MagneticData.COLOR_CYCOL)
def plot(self, ax=plt.gca()):
self.raw.plot(ax)
self.line.plot(ax)
self.prepared_raw.plot(ax)
self.for_fit.plot(ax)
self.fit.plot(ax)
ax.set_ylabel(self.y_label, fontsize=16, fontweight='bold')
ax.set_xlabel(self.x_label, fontsize=16, fontweight='bold')
ax.grid(True)
ax.set_title(self.title, fontsize=self.font_size)
# ax.legend(shadow=True, fancybox=True, loc='best')
ax.legend(shadow=False, fancybox=True, loc='best')
# ax.fill_between(x, y - error, y + error,
# alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
# linewidth=4, linestyle='dashdot', antialiased=True, label='$\chi(k)$')
def calc_R_factor(self, raw=[], fit=[]):
# eval R-factor
denominator = np.sum(np.abs(raw))
if (len(raw) == len(fit)) and (denominator != 0):
self.R_factor = 100 * np.sum(np.abs(raw - fit))/denominator
self.std = np.sqrt(
np.sum(
(raw - fit)**2
) / ( len(raw) -1 )
)
else:
print('raw = {} and fit = {}'.format(len(raw), len(fit)))
def get_R_factor(self, raw=[], fit=[]):
self.calc_R_factor(raw, fit)
return self.R_factor
def get_std(self, raw=[], fit=[]):
self.calc_R_factor(raw, fit)
return self.std
class StructComplex(StructBase):
def __init__(self):
super().__init__()
self.model_A = StructBase()
self.model_B = StructBase()
def set_global_Mn2_plus_high(self):
self.model_A.set_Mn2_plus_high()
self.model_B.set_Mn2_plus_high()
def set_global_Mn2_plus_low(self):
self.model_A.set_Mn2_plus_low()
self.model_B.set_Mn2_plus_low()
def set_global_Mn3_plus_low(self):
self.model_A.set_Mn3_plus_low()
self.model_B.set_Mn3_plus_low()
def find_common_region_for_fit(self):
if len(self.model_A.prepared_raw.magnetic_field) != len(self.model_B.prepared_raw.magnetic_field):
print('len(T={T1}K)={L1} but len(T={T2}K)={L2}'.format(
T1=self.model_A.prepared_raw.temperature,
L1=len(self.model_A.prepared_raw.magnetic_field),
T2=self.model_B.prepared_raw.temperature,
L2=len(self.model_B.prepared_raw.magnetic_field)
))
# Find the intersection of two arrays to avoid conflict with numbers of elements.
self.model_A.prepared_raw.accepted_indices = np.nonzero(
np.isin(self.model_A.prepared_raw.magnetic_field,
self.model_B.prepared_raw.magnetic_field))
self.model_B.prepared_raw.accepted_indices = np.nonzero(
np.isin(self.model_B.prepared_raw.magnetic_field,
self.model_A.prepared_raw.magnetic_field))
def prepare_data_for_diff_calc(self):
self.raw.magnetic_field = self.model_A.prepared_raw.magnetic_field[self.model_A.prepared_raw.accepted_indices]
# for calculating diff_PM we need 2 different Temperature data for ex: m(T=2K) - m(T=5K)
# select only common points in two models:
self.raw.magnetic_moment = \
self.model_A.prepared_raw.magnetic_moment[self.model_A.prepared_raw.accepted_indices] - \
self.model_B.prepared_raw.magnetic_moment[self.model_B.prepared_raw.accepted_indices]
self.prepared_raw = deepcopy(self.raw)
# shift spectra to the center line:
if len(self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field > 0)]) \
!= \
len(self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field < 0)]):
# reduce a noise:
negVal = abs(np.min(self.prepared_raw.magnetic_field))
pozVal = np.max(self.prepared_raw.magnetic_field)
if pozVal >= negVal:
limitVal = negVal
else:
limitVal = pozVal
eps = 0.001 * abs(abs(self.prepared_raw.magnetic_field[0]) - abs(self.prepared_raw.magnetic_field[1]))
self.prepared_raw.magnetic_field = self.prepared_raw.magnetic_field[
np.logical_or((np.abs(self.prepared_raw.magnetic_field) <= limitVal + eps),
(np.abs(self.prepared_raw.magnetic_field) <= eps))
]
Mp = []
Mn = []
Mp = self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field > 0)]
Mn = self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field < 0)]
if len(self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field > 0)]) \
== \
len(self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field < 0)]):
# reduce a noise:
Mp = self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field > 0)]
Mn = self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field < 0)]
self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field > 0)] = \
0.5 * (Mp + np.abs(Mn[::-1]))
self.prepared_raw.magnetic_moment[np.where(self.prepared_raw.magnetic_field < 0)] = \
0.5 * (Mn - np.abs(Mp[::-1]))
# M_for_fit[(B > 0)] = 0.5*(Mp + np.abs(Mn))
# M_for_fit[(B < 0)] = 0.5*(Mn - np.abs(Mp))
self.for_fit = deepcopy(self.prepared_raw)
self.fit = deepcopy(self.prepared_raw)
if __name__ == '__main__':
tmp_obj = StructComplex()
print('-> you run ', __file__, ' file in a main mode') | yuginboy/from_GULP_to_FEFF | feff/libs/GaMnAs_sub_classes.py | Python | gpl-3.0 | 33,150 | [
"FEFF"
] | d452a4b82a396b982417c463ac8efd18887bf86eda7e902a2cfbc8f91a80096a |
#!/usr/bin/env python
"""
Diffract.py
"""
__date__ = "20130101"
__author__ = "jlettvin"
__maintainer__ = "jlettvin"
__email__ = "[email protected]"
__copyright__ = "Copyright(c) 2013 Jonathan D. Lettvin, All Rights Reserved"
__license__ = "GPLv3"
__status__ = "Production"
__version__ = "0.0.1"
"""
Diffract.py
Implements an Airy function generator/convolver.
Copyright(c) 2013 Jonathan D. Lettvin, All Rights Reserved"
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
###############################################################################
import sys, itertools, scipy
from scipy import array, arange, ones, zeros
from scipy import exp, sqrt, pi, fabs, ceil
from scipy import set_printoptions
from scipy.special import j1
from scipy.misc import imresize
from scipy.ndimage.interpolation import affine_transform
from Image import fromarray
from Pickets import Pickets
from Report import Report
###############################################################################
set_printoptions(precision=2, suppress=True, linewidth=150)
###############################################################################
class Human(Report):
aawf = {'radius' : 0e+0,
'amplitude' : 1e+0,
'aperture' : 1e-3,
'wavelength': 4e-7,
'focal' :17e-3}
zeroPoints = [ # u values where Airy goes to zero, discovered by hand.
3.83170611001,
7.01558711101,
10.1734711001, # Maximum kernel radius parameter.
# u = (pi*r*a) / (w*f) where:
# r radius kernel max
# a aperture diameter (fixed for a given iteration)
# w wavelength nanometers (fixed for a color plane)
# f focal length (fixed for a human eye)
# The radius of a kernel is determined by setting all other values.
13.3236911001,
16.4706211111,
19.6158611111]
maxima = [
{'u': 0.0 , 'a': 1.0 }, # 1/1
{'u': 5.13562311005, 'a': 0.0174978627858 }, # ~1/57
{'u': 8.41724511071, 'a': 0.00415799638453 }, # ~1/240
# This is proof that only three maxima and three zeros are needed
# for 32 bit color (8bit Red, Green, and Blue).
{'u': 11.6198420998, 'a': 0.00160063766822 }, # ~1/624
{'u': 14.7959530997, 'a': 0.000779445355471}, # ~1/1284
{'u': 17.9598201116, 'a': 0.000437025551621}, # ~1/2288
{'u': 21.1169981116, 'a': 0.000269287409511},] # ~1/3717
@property
def parameters(self):
return Human.aawf
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, **kw):
"""
__init__ generates triangular kernel wedges
then distributes the wedge over eight-fold symmetry
to fill an entire square of odd length edge.
"""
self.kw = kw
self.verbose = self.kw.get('verbose', False)
self.ignore = self.kw.get('ignore', 1.0/255.0)
self.coeff = 2e-1
self.radius = self.kernelRadius(**kw)
self.edge = 1 + 2 * self.radius
self.shape = (self.edge, self.edge)
self.original = zeros(self.shape, float)
self.offset = Pickets(4)
self.pickets = Pickets(1)
kw['radius'] = 0.0
test = self.wave()
if self.verbose:
self.info('generating kernels for %d radial intervals' %
(self.offset.interval))
if self.kw.get('generate', False):
w, a = 534e-9, 7e-3
# Generate kernels for sub-pixel offsets.
for dx, dy in self.offset.data:
kernel = self.genAiry(dx, dy, w, a)
else:
pass
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def wave(self, **kw):
amplitude, r, a, w, f = (
kw.get(key, Human.aawf[key]) for key in (
'amplitude', 'radius', 'aperture', 'wavelength', 'focal'))
u = (pi*r*a)/(w*f)
if isinstance(r,float):
return amplitude*(1.0 if u == 0.0 else 2.0*j1(u)/u)
else:
u[u==0.0] = 1e-32
return amplitude*2.0*j1(u)/u
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def kernelRadius(self, **kw):
"""
For r in millimeters:
u = (pi*r*a) / (w*f) # Standard calculation for wave equation
r = (u*w*f) / (pi*a) # Algebraic method for determining radius
wiggle # Leaves room for picket wiggle
"""
a, w, f = (float(kw.get(key, Human.aawf[key]))
for key in ('aperture', 'wavelength', 'focal'))
mm_per_meter, h = Human.zeroPoints[2], 1e6
return int(ceil((mm_per_meter * h * w * f) / (pi*a)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def genRadii(self, R, dxy=(0.0,0.0), exy=(0.0,0.0)):
edge = 1.0 + 2.0 * R
um = 1e-6
accum = zeros((edge, edge), float)
radii = zeros((edge, edge), float)
dx, dy = dxy
ex, ey = exy
sequence = [(X, Y, um*float(X-R), um*float(Y-R)) for X, Y in
list(itertools.product(range(int(edge)), repeat=2))]
for X, Y, x, y in sequence:
radii[X,Y] = sqrt((x+dx+ex)**2 + (y+dy+dy)**2)
return accum
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def genAiry(self, dx, dy, w, a, **kw):
if self.verbose:
self.info("generating kernel at offset(%1.1f,%1.1f)" % (dx,dy))
um = 1e-6 # microns in meters
kw['aperture' ] = a # millimeters in meters
kw['wavelength'] = w # nanometers in meters
R = self.radius = self.kernelRadius(**kw) # max displace from (0,0)
R1 = R + 1 # slop for subpixel offset
self.edge = 1 + 2 * R1 # linear size of mask
radii = zeros((self.edge, self.edge), float)
accum = zeros((self.edge, self.edge), float)
# Make a list of [X,Y] indices and (x,y) coordinates from (0,0) center.
sequence = [(X, Y, um*float(X-R1), um*float(Y-R1)) for X, Y in
list(itertools.product(range(self.edge), repeat=2))]
# List of sub-pixel displacements
pickets1 = [[0.0,0.0],]
pickets2 = array(self.pickets.data) * um
# Introduce multiple small offsets to emulate a pixel's
# physical face size. This eliminates sampling errors
# that would artificially amplify exactly centered pixels.
for ex, ey in pickets2:
for X, Y, x, y in sequence:
# Determine optical displacement
radii[X,Y] = sqrt((x+dx+ex)**2 + (y+dy+dy)**2)
kw['radius'] = radii
# Generate wave function
component = self.wave(**kw)
# Eliminate radii outside third zero?
component[radii > (R*um)] = 0.0
# Sum (precursor to discrete integration.
accum += component
# Normalize to sqrt of intensity map sum
accum /= sqrt((accum ** 2).sum())
# Keep a copy as a file.
self.save(accum, R, dx, dy)
# Return kernel.
return accum
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def genGauss(self, r=3.0, **kw):
# 1/255 is interpreted as a threshold value of light detection.
# Gauss is value = exp(-coeff * r * r)
# 1/255 = exp(-coeff * r * r)
# log(1/255) = -coeff * r * r
# -log(1/(255*r*r)) = coeff
radii = self.genRadii(r) # Make a field of radii
coeff = - scipy.log(1.0 / (255.0 * r * r)) # Calculate coefficient
accum = exp(-coeff * radii)**2 # Make a Gaussian field
accum[radii>r] = 0.0 # Eliminate outliers
fudge = 0.95 # Reduce mask (why?)
accum *= fudge / accum.sum() # normalize mask
return accum
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def save(self, kernel, R, dx=0, dy=0, **kw):
rescaled = (255.0 * kernel).astype('uint8')
name = "kernels/Airy/kernel.Airy.R%d.dx%1.2f.dy%1.2f" % (R, dx, dy)
image = fromarray(rescaled)
image.save(name+".png")
scipy.save(name+".npy", kernel)
#MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
if __name__ == "__main__":
def test_kernel_radius(human):
aawf = human.parameters
radius = human.kernelRadius
for a in arange(1.0e-3, 9.0e-3, 1.0e-3):
aawf['aperture'] = a
print "Aperture %f kernel radius=%6.3f" % (a, radius(**aawf))
human = Human(verbose=True, generate=True)
test_kernel_radius(human)
#human.save()
| jlettvin/rpna | gpgpu/Diffract.py | Python | gpl-3.0 | 10,064 | [
"Gaussian"
] | f85817fd5b14f258c2d425af474b55870c33e5b4c1f6f0c1136d62a30d046db9 |
# File: jsdoc2xml.py
# TODO: [unknown] treat the constructor specially
# * P_LZ_COPYRIGHT_BEGIN ******************************************************
# * Copyright 2001-2006 Laszlo Systems, Inc. All Rights Reserved. *
# * Use is subject to license terms. *
# * P_LZ_COPYRIGHT_END ********************************************************
from __future__ import nested_scopes
import os
import xmldocutils
from xmldocutils import *
# Old scaffolding from Compiler.py
false, true = 0, 1
import org.openlaszlo.sc.Compiler as Compiler
import org.openlaszlo.sc.CompilerError as CompilerError
import org.openlaszlo.sc.parser.ParseException as ParseException
# jythonc has special support for 'import...as'. It's necessary to use
# it for importing classes from the same JAR that this file is compiled
# into.
import org.openlaszlo.sc.parser.ASTAndExpressionSequence as AndExpressionSequence
import org.openlaszlo.sc.parser.ASTArrayLiteral as ArrayLiteral
import org.openlaszlo.sc.parser.ASTAssignmentExpression as AssignmentExpression
import org.openlaszlo.sc.parser.ASTBinaryExpressionSequence as BinaryExpressionSequence
import org.openlaszlo.sc.parser.ASTBreakStatement as BreakStatement
import org.openlaszlo.sc.parser.ASTCallExpression as CallExpression
import org.openlaszlo.sc.parser.ASTCaseClause as CaseClause
import org.openlaszlo.sc.parser.ASTClassDefinition as ClassDefinition
import org.openlaszlo.sc.parser.ASTConditionalExpression as ConditionalExpression
import org.openlaszlo.sc.parser.ASTContinueStatement as ContinueStatement
import org.openlaszlo.sc.parser.ASTDefaultClause as DefaultClause
import org.openlaszlo.sc.parser.ASTDirectiveBlock as DirectiveBlock
import org.openlaszlo.sc.parser.ASTDoWhileStatement as DoWhileStatement
import org.openlaszlo.sc.parser.ASTEmptyExpression as EmptyExpression
import org.openlaszlo.sc.parser.ASTExpressionList as ExpressionList
import org.openlaszlo.sc.parser.ASTExtends as Extends
import org.openlaszlo.sc.parser.ASTForInStatement as ForInStatement
import org.openlaszlo.sc.parser.ASTForStatement as ForStatement
import org.openlaszlo.sc.parser.ASTForVarInStatement as ForVarInStatement
import org.openlaszlo.sc.parser.ASTForVarStatement as ForVarStatement
import org.openlaszlo.sc.parser.ASTFormalParameterList as FormalParameterList
import org.openlaszlo.sc.parser.ASTFunctionCallParameters as FunctionCallParameters
import org.openlaszlo.sc.parser.ASTFunctionDeclaration as FunctionDeclaration
import org.openlaszlo.sc.parser.ASTFunctionExpression as FunctionExpression
import org.openlaszlo.sc.parser.ASTIdentifier as Identifier
import org.openlaszlo.sc.parser.ASTIfDirective as IfDirective
import org.openlaszlo.sc.parser.ASTIfStatement as IfStatement
import org.openlaszlo.sc.parser.ASTIncludeDirective as IncludeDirective
import org.openlaszlo.sc.parser.ASTLabeledStatement as LabeledStatement
import org.openlaszlo.sc.parser.ASTLiteral as Literal
import org.openlaszlo.sc.parser.ASTNewExpression as NewExpression
import org.openlaszlo.sc.parser.ASTObjectLiteral as ObjectLiteral
import org.openlaszlo.sc.parser.ASTOperator as Operator
import org.openlaszlo.sc.parser.ASTOrExpressionSequence as OrExpressionSequence
import org.openlaszlo.sc.parser.ASTPostfixExpression as PostfixExpression
import org.openlaszlo.sc.parser.ASTPragmaDirective as PragmaDirective
import org.openlaszlo.sc.parser.ASTProgram as Program
import org.openlaszlo.sc.parser.ASTPropertyIdentifierReference as PropertyIdentifierReference
import org.openlaszlo.sc.parser.ASTPropertyValueReference as PropertyValueReference
import org.openlaszlo.sc.parser.ASTReturnStatement as ReturnStatement
import org.openlaszlo.sc.parser.ASTStatement as Statement
import org.openlaszlo.sc.parser.ASTStatementList as StatementList
import org.openlaszlo.sc.parser.ASTSuperCallExpression as SuperCallExpression
import org.openlaszlo.sc.parser.ASTSwitchStatement as SwitchStatement
import org.openlaszlo.sc.parser.ASTThisReference as ThisReference
import org.openlaszlo.sc.parser.ASTThrowStatement as ThrowStatement
import org.openlaszlo.sc.parser.ASTTryStatement as TryStatement
import org.openlaszlo.sc.parser.ASTUnaryExpression as UnaryExpression
import org.openlaszlo.sc.parser.ASTVariableDeclaration as VariableDeclaration
import org.openlaszlo.sc.parser.ASTVariableStatement as VariableStatement
import org.openlaszlo.sc.parser.ASTWhileStatement as WhileStatement
import org.openlaszlo.sc.parser.ASTWithStatement as WithStatement
class Visitor:
def getVisitor(self, node):
# trim the module name, and the initial 'AST'
if node.class is Identifier:
name = 'Identifier'
else:
name = node.class.name
name = name[name.rfind('.')+4:]
return getattr(self, 'visit' + name, None)
# end scaffolding
# This is a bit of a kludge. The doc tool will barf if documentation
# is different for different targets, so we just ignore all targets
# but one.
COMPILER_CONSTANTS = {'$debug': true,
'$profile': false,
'$swf6': true,
'$swf7': false,
'$swf8': false,
'$dhtml': false}
class DocumentationError(CompilerError):
pass
# FIXME: [2003-05-08 ptw] Need a way to get the right number
SINCE = 2.2
# FIXME: [2003-05-08 ptw] Migrate these to the sources.
# ['LzCloneDatapath.clonenumber',
# 'LzDatapath.text-nodes',
# 'LzDatapointer.text-nodes',
# 'LzDatapointer.getOtherNodeText',
# 'LzDataset.acceptencodings',
# 'LzDataset.sendheaders',
# 'LzDataset.cacheable',
# 'Debug.escapeText',
# 'LzBrowser.xmlEscape']
def carefulParseComment(str, validKeys):
try:
return parseComment(str, validKeys)
except Exception, e:
raise DocumentationError("malformed comment: %s" % e)
class CommentExtractor(Visitor):
def __init__(self, handler):
self.context = None #'.'
self.handler = handler
self.insideFunction = False
self.errors = []
def parse(self, str):
p = Compiler.Parser().parse(str)
self.visit(p)
def read(self, uname):
import os
fname = os.path.join(self.context or '.', uname)
saved = self.context
self.context = self.context or os.path.split(fname)[0]
f = open(fname)
try:
try:
self.parse(('#file "%s"\n#line 1\n' % uname) + f.read())
finally:
f.close()
except xmldocutils.InputError, e:
raise DocumentationError('%s: %s' % (fname, str(e)))
except ParseException, e:
raise CompilerError(("while in %s\n" % uname) + str(e))
self.context = saved
def visit(self, node):
fn = self.getVisitor(node)
children = node.children
try:
if fn:
return fn(node, *children)
# Sanity
if node.comment and \
isDocComment(node.comment):
if len(children) >= 1 and children[0].comment == node.comment:
# fall through to children loop
pass
else:
raise DocumentationError("Unhandled doc comment: %s" % (node.comment), node)
except DocumentationError, e:
if not e.node:
e.attachNode(node)
self.errors.append(e)
except CompilerError, e:
if not e.node:
e.attachNode(node)
raise
for c in children:
try:
self.visit(c)
except DocumentationError, e:
if not e.node:
e.attachNode(node)
self.errors.append(e)
except CompilerError, e:
if not e.node:
e.attachNode(node)
raise
def doClassConstructor(self, className, superclassName, constructor, comment):
# permit 'param' because they will be consumed by
# the addFunction of the constructor below
k = self.handler.addClass(className, superclassName, comment, constructor and ['param'] or [])
if constructor:
# process constructor
if 'private_constructor' in k.keywords:
name = None
else:
name = className + '.constructor'
self.doFunctionStatements(name, constructor, comment)
def doFunctionStatements(self, name, function, comment):
children = function.children
if name:
# In debug/profile mode, function expressions have names
if len(children) == 2:
args = [id.name for id in children[0].children]
else:
args = [id.name for id in children[1].children]
self.handler.addFunction(name, args, comment)
# process fields and events
saved = self.insideFunction
self.insideFunction = True
self.visit(children[-1])
self.insideFunction = saved
def visitAssignmentExpression(self, node, *children):
comment = node.comment
comment = comment and extractComment(comment)
name = Compiler.ParseTreePrinter().visit(node[0]).replace('.prototype.', '.')
value = node[2]
if name.startswith('_root.'):
name = name.split('.', 1)[1]
path = name.split('.')
lastname = path[-1]
if value.class is FunctionExpression:
if '.' not in name:
if comment:
self.doClassConstructor(name, 'Object', value, comment)
return
elif not comment and not name.endswith('.dependencies') and \
lastname not in ('initialize', 'toString'):
raise DocumentationError('%s: missing documentation' % name, node)
if comment:
self.doFunctionStatements(name, value, comment)
elif '.' not in name:
# name = new Object(...)
# name = new LzTransformer(...)
if value.class is NewExpression and \
value[0].class is CallExpression and \
value[0][0].class is Identifier and \
value[0][0].name in ['Object', 'LzTransformer']:
self.handler.addClass(name, value[0][0].name, comment)
# name = new Object;
elif value.class is NewExpression and \
value[0].class is Identifier and \
value[0].name == 'Object':
self.handler.addClass(name, 'Object', comment)
# name = Object;
elif value.class is Identifier and \
value.name == 'Object':
self.handler.addClass(name, 'Object', comment)
# name = Class("name", null, ...);
# name = Class("name", Super, ...);
# --- Temporarily look for NewLzClass
elif value.class is CallExpression and \
value[0].class is Identifier and \
value[0].name in ['Class', 'NewLzClass'] and \
value[1][0].value == name:
if value[1][1].class is Identifier:
super = value[1][1].name
else:
super = value[1][1]
assert super.class is Literal
assert not super.value
super = 'Object'
if len(value[1]) > 2 and \
value[1][2].class is FunctionExpression:
constructor = value[1][2]
else:
constructor = None
self.doClassConstructor(name, super, constructor, comment)
# name = LzTransformer();
elif value.class is CallExpression and \
value[0].class is Identifier and \
value[0].name == 'LzTransformer':
self.handler.addClass(name, 'Object', comment)
#elif not self.insideFunction:
# print 'skipping', name
elif len(path) > 2 and path[-2] == 'setters':
c = carefulParseComment(comment, ['keywords', 'field', 'since', 'deprecated'])
if 'private' in c.keywords:
pass
else:
k = self.handler.internClass(path[0])
# N.B.: fields will be handled in visitStatement
since = getattr(c, 'since', None)
deprecated = getattr(c, 'deprecated', None)
if deprecated or (since and float(since) > SINCE):
pass
else:
k.setters.append(lastname)
if c.comment and isDocComment(node.comment):
raise DocumentationError("Unhandled doc comment: %s" % (c.comment), node)
def visitIfDirective(self, node, test, *branches):
if test.class is Identifier and COMPILER_CONSTANTS.get(test.name) is not None:
if COMPILER_CONSTANTS[test.name]:
branches = branches[:1]
else:
branches = branches[1:]
map(self.visit, branches)
def visitStatement(self, node, *children):
if node.comment:
comment = node.comment.strip()
if (comment.find('@field') != -1 \
or comment.find('@event') != -1):
lines = comment.split('\n')
lines = [line[2:] for line in lines]
if self.insideFunction:
tags = ['field', 'event', 'since', 'deprecated']
else:
# Allow keywords param return if not in a function
tags = ['field', 'event', 'since', 'deprecated', 'keywords', 'param', 'return']
c = carefulParseComment('\n'.join(lines), tags)
if 'private' in c.keywords:
pass
else:
since = getattr(c, 'since', None)
deprecated = getattr(c, 'deprecated', None)
if deprecated or (since and float(since) > SINCE):
def ign(name):
IGNORED.append(name)
map(ign, c.getFields('field'))
map(ign, c.getFields('event'))
else:
map(self.handler.addField, c.getFields('field'))
map(self.handler.addEvent, c.getFields('event'))
if self.insideFunction and \
len(children) == 1 and children[0].comment == node.comment:
# comment has been handled
return
elif isDocComment(comment):
if len(children) >= 1 and children[0].comment == node.comment:
# fall through to children loop
pass
else:
raise DocumentationError("Unhandled doc comment: %s" % (node.comment), node)
map(self.visit, children)
def visitVariableStatement(self, node, decl):
if len(decl.children) == 1:
return
if len(decl.children) != 2:
return # TODO: [unknown]
name, value = decl.children
# var name = Class("name", null)
# var name = Class("name", Super)
if value.class is CallExpression and \
value[0].class is Identifier and \
value[0].name == 'Class' and \
value[1][0].class is Literal and \
value[1][0].value == name.name:
s = node.comment and extractComment(node.comment)
if value[1][1].class is Identifier:
super = value[1][1].name
else:
super = value[1][1]
assert super.class is Literal
assert not super.value
super = 'Object'
self.handler.addClass(name.name, super, s)
# var name = new Object(...)
if value.class is NewExpression and \
value[0].class is CallExpression and \
value[0][0].class is Identifier and \
value[0][0].name == 'Object':
s = node.comment and extractComment(node.comment)
self.handler.addClass(name.name, 'Object', s)
def visitPropertyIdentifierReference(self, node, *children):
return self.visit(node[0])
def visitIdentifier(self, node):
return node.comment
def visitFunctionDeclaration(self, node, *children):
s = node.comment
if s and s.find('DEFINE OBJECT') > 0:
comment = extractComment(node.comment)
self.doClassConstructor(node[0].name, 'Object', node, comment)
def visitFunctionExpression(self, *children):
pass
def visitIncludeDirective(self, node, *children):
self.read(node[0].value)
import re
docCommentPattern = re.compile(r'''.*?^\s*//(\s*@|---|===).*''', re.MULTILINE)
def isDocComment(comment):
return re.match(docCommentPattern, comment)
# Extract the comment between //==='s (top level style) or all the
# comment following the first @ (inline style)
def extractComment(str):
if isDocComment(str):
lines = [s[2:] for s in str.split('\n') if s.startswith('//')]
i = len(lines)-1
while i >= 0 and lines[i][:3] not in ('---', '==='):
i -= 1
if i > 0:
lines = lines[:i]
i = len(lines)-1
while i >= 0 and lines[i][:3] not in ('---', '==='):
i -= 1
if i >= 0:
lines = lines[i+1:]
if lines and lines[0].strip().startswith('DEFINE OBJECT'):
lines = lines[1:]
else:
while lines and not lines[0].strip().startswith('@'):
lines = lines[1:]
return '\n'.join(lines)
def getKeywords(comment):
for line in comment.split('\n'):
line = line.strip()
if line.startswith('@keywords'):
return line.split()[1:]
return []
class Handler:
def __init__(self):
self.classes = {}
self.lastClass = None
def addClass(self, name, super, comment, additionalKeywords=[]):
if self.classes.has_key(name):
raise DocumentationError('duplicate definition for class %s' % name)
additionalKeywords = additionalKeywords + (['field', 'event', 'keywords', 'since', 'deprecated'])
c = carefulParseComment(comment, additionalKeywords)
deprecated = getattr(c, 'deprecated', None)
since = getattr(c, 'since', None)
if deprecated or (since and float(since) > SINCE):
IGNORED.append(name)
return
# TODO: [unknown] copy params to constructor
k = Class(name=name, extends=super, desc=c.comment, keywords=c.keywords)
self.lastClass = k
self.classes[name] = k
map(self.addField, c.getFields('field'))
map(self.addEvent, c.getFields('event'))
return k
def internClass(self, name):
if not self.classes.has_key(name):
if name != 'Object':
raise DocumentationError('undefined class %s' % name)
self.addClass(name, 'LzNode', None)
return self.classes[name]
def addEvent(self, event):
if not self.lastClass:
raise DocumentationError('no class to add this event to')
self.lastClass.events.append(event)
def addField(self, field):
if not self.lastClass:
raise DocumentationError('no class to add this field to')
self.lastClass.attributes.append(field)
def addFunction(self, name, args, comment):
c = carefulParseComment(comment, ['param', 'return', 'keywords', 'since', 'deprecated'])
deprecated = getattr(c, 'deprecated', None)
since = getattr(c, 'since', None)
if deprecated or (since and float(since) > SINCE):
IGNORED.append(name)
return
if '.' not in name:
return # TODO: [unknown] create global function
parts = name.split('.')
if len(parts) != 2:
return
kn, mn = name.split('.')
params = [Param(name=pn) for pn in args]
undoced = list(args)
for k, v in c.params.items():
candidates = [p for p in params if p.name == k]
if len(candidates) != 1:
raise DocumentationError('%s: comment for nonexistent parameter %r' % (name, k))
continue
undoced.remove(k)
candidates[0].desc = v
candidates[0].type = c.paramTypes.get(k)
# TODO [2006-03-13 ptw] Enable this when all params are actually doc-ed
# if undoced:
# raise DocumentationError('%s: no comment for parameter(s) %r' % (name, undoced))
method = Method(name=mn, parameters=params, desc=c.comment)
if hasattr(c, 'return'):
rd = getattr(c, 'return')
if rd.startswith(':'): rd = rd[1:]
type = None
if ':' in rd:
type, rd = rd.split(':', 1)
type = type.strip()
rd = rd.strip()
setattr(method, 'return', Return(desc=rd, type=type))
if not 'private' in c.keywords:
klass = self.internClass(kn)
if [m for m in klass.methods if m.name == mn]:
raise DocumentationError('duplicate method definition: %s.%s' % (klass.name, mn))
klass.methods.append(method)
class Class(Serializable):
Name = 'api'
AttrAttrs = ['name', 'extends']
ContentAttrs = ['desc', 'attributes', 'fields', 'events', 'methods']
def __init__(self, **initargs):
Serializable.__init__(self, **initargs)
self.methods = []
self.attributes = []
self.events = []
self.setters = []
def finalize(self):
def hasSetter(attr):
return attr.name in self.setters
self.fields = [Field(name=a.name, type=a.type, desc=a.desc)
for a in self.attributes if not hasSetter(a)]
self.attributes = [a for a in self.attributes if hasSetter(a)]
for name in self.setters:
if not [a for a in self.attributes if a.name == name]:
self.attributes.append(Attribute(name=name))
class Method(Serializable):
AttrAttrs = ['name', 'keywords']
ContentAttrs = ['desc', 'parameters', 'return']
class Return(Serializable):
AttrAttrs = ['type']
ContentAttrs = ['desc']
class Field(Serializable):
AttrAttrs = ['name', 'type']
ContentAttrs = ['desc']
# used for debugging
def showComments(n):
if n.comment:
print n, n.comment
map(showComments, n.children)
def process(fname='LaszloLibrary.lzs'):
import os
try: os.makedirs('jsdocs')
except: pass
global IGNORED
IGNORED = []
h = Handler()
ce = CommentExtractor(h)
ce.read(os.path.join(os.getenv('LPS_HOME'), 'WEB-INF/lps/lfc/', fname))
errors = ce.errors
for k in h.classes.values():
if 'private' in k.keywords:
continue
k.finalize()
fname = os.path.join('jsdocs', 'api-' + k.name.lower() + '.xml')
f = open(fname, 'w')
try:
k.toxml(Writer(f))
finally:
f.close()
import org.apache.xerces.parsers.SAXParser as SAXParser
import java.io.FileReader as FileReader
import org.xml.sax.InputSource as InputSource
import org.xml.sax.helpers.DefaultHandler as DefaultHandler
import org.xml.sax.SAXParseException as SAXParseException
handler = DefaultHandler()
p = SAXParser()
p.contentHandler = handler
p.errorHandler = handler
r = FileReader(fname)
try:
p.parse(InputSource(r))
except SAXParseException, e:
errors.append(DocumentationError('%s: %s' % (fname, e.message)))
#print 'ignored', IGNORED
if errors:
for e in errors:
print e.toString()
print "Mistakes were made"
return 1
else:
return 0
test = process
def main(args):
assert not args
status = process()
import sys
sys.exit(status)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
else:
#test('core/LzNode.as')
test()
| mcarlson/openlaszlo | WEB-INF/lps/server/sc/jsdoc2xml.py | Python | epl-1.0 | 24,404 | [
"VisIt"
] | f7296f3f4fbcfca8d260470e7f86c69353fac0680e85a2ff3222c6c7fa9bfaea |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
""" Product delivery editor implementation """
import collections
import decimal
from kiwi.datatypes import ValidationError
from kiwi.ui.forms import PriceField, DateField, TextField, BoolField, EmptyField
from kiwi.ui.objectlist import Column, ObjectList
from stoqlib.api import api
from stoqlib.domain.person import Client, Transporter
from stoqlib.domain.sale import Delivery
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.editors.noteeditor import NoteEditor
from stoqlib.gui.fields import AddressField, PersonField
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.formatters import format_quantity
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class _CreateDeliveryModel(object):
_savepoint_attr = ['price', 'notes', 'client', 'transporter', 'address',
'estimated_fix_date']
def __init__(self, price=None):
self.price = price
self.notes = None
self.client = None
self.transporter = None
self.address = None
self.estimated_fix_date = localtoday().date()
self.description = _(u'Delivery')
self._savepoint = {}
# Since _CreateDeliveryModel is not a Domain, changes to it can't be
# undone by a store.rollback(). Therefore, we must make the rollback
# by hand.
def create_savepoint(self):
for attr in self._savepoint_attr:
self._savepoint[attr] = getattr(self, attr)
def rollback_to_savepoint(self):
if not self._savepoint:
return
for attr in self._savepoint_attr:
setattr(self, attr, self._savepoint.get(attr))
return self
class CreateDeliveryEditor(BaseEditor):
"""A fake delivery editor implementation.
This is used to get get information for creating a delivery,
without really creating a it.
"""
model_name = _('Delivery')
model_type = _CreateDeliveryModel
form_holder_name = 'forms'
gladefile = 'CreateDeliveryEditor'
title = _('New Delivery')
form_columns = 2
size = (750, 550)
@cached_property()
def fields(self):
# Only users with admin or purchase permission can modify transporters
user = api.get_current_user(self.store)
can_modify_transporter = any((
user.profile.check_app_permission(u'admin'),
user.profile.check_app_permission(u'purchase'),
))
return collections.OrderedDict(
client_id=PersonField(_("Client"), proxy=True, mandatory=True,
person_type=Client),
transporter_id=PersonField(_("Transporter"), proxy=True,
person_type=Transporter,
can_add=can_modify_transporter,
can_edit=can_modify_transporter),
address=AddressField(_("Address"), proxy=True, mandatory=True),
price=PriceField(_("Delivery cost"), proxy=True),
estimated_fix_date=DateField(_("Estimated delivery date"), proxy=True),
)
def __init__(self, store, model=None, sale_items=None):
self.sale_items = sale_items
self._deliver_items = []
if not model:
for sale_item in sale_items:
sale_item.deliver = True
else:
model.create_savepoint()
# Store this information for later rollback.
for sale_item in sale_items:
self._deliver_items.append(sale_item.deliver)
BaseEditor.__init__(self, store, model)
self._setup_widgets()
def _setup_widgets(self):
self.additional_info_label.set_size('small')
self.additional_info_label.set_color('red')
self.register_validate_function(self._validate_widgets)
self.set_description(self.model_name)
self._update_widgets()
def _validate_widgets(self, validation_value):
if validation_value:
validation_value = any(item.deliver for item in self.items)
self.refresh_ok(validation_value)
def _update_widgets(self):
if self.model.notes:
self.additional_info_label.show()
else:
self.additional_info_label.hide()
def _get_sale_items_columns(self):
return [Column('code', title=_('Code'),
data_type=str),
Column('description', title=_('Description'),
data_type=str, expand=True),
Column('quantity', title=_('Quantity'),
data_type=decimal.Decimal, format_func=format_quantity),
Column('deliver', title=_('Deliver'),
data_type=bool, editable=True)]
#
# Callbacks
#
def on_additional_info_button__clicked(self, button):
if run_dialog(NoteEditor, self, self.store, self.model, 'notes',
title=_('Delivery Instructions')):
self._update_widgets()
def on_estimated_fix_date__validate(self, widget, date):
if date < localtoday().date():
return ValidationError(_("Expected delivery date must "
"be set to a future date"))
def on_price__validate(self, widget, price):
if price < 0:
return ValidationError(
_("The Delivery cost must be a positive value."))
def on_client_id__content_changed(self, combo):
client_id = combo.get_selected_data()
if client_id:
client = self.store.get(Client, client_id)
self.fields['address'].set_from_client(client)
else:
client = None
self.model.client = client
def _on_items__cell_edited(self, items, item, attribute):
self.force_validation()
#
# BaseEditor hooks
#
def create_model(self, store):
price = sysparam.get_object(store, 'DELIVERY_SERVICE').sellable.price
return _CreateDeliveryModel(price=price)
def setup_slaves(self):
self.items = ObjectList(columns=self._get_sale_items_columns(),
objects=self.sale_items)
self.items.connect('cell-edited', self._on_items__cell_edited)
self.addition_list_holder.add(self.items)
self.items.show()
def on_cancel(self):
# FIXME: When Kiwi allows choosing proxies to save upon confirm, apply
# that here instead of making this rollback by hand. Bug 5415.
self.model.rollback_to_savepoint()
if self._deliver_items:
for sale_item, deliver in zip(self.sale_items, self._deliver_items):
sale_item.deliver = deliver
def on_confirm(self):
estimated_fix_date = self.estimated_fix_date.read()
for sale_item in self.sale_items:
sale_item.estimated_fix_date = estimated_fix_date
class DeliveryEditor(BaseEditor):
"""An editor for :class:`stoqlib.domain.sale.Delivery`"""
title = _("Delivery editor")
gladefile = 'DeliveryEditor'
size = (-1, 400)
model_type = Delivery
model_name = _('Delivery')
form_holder_name = 'forms'
form_columns = 2
@cached_property()
def fields(self):
# Only users with admin or purchase permission can modify transporters
user = api.get_current_user(self.store)
can_modify_transporter = any((
user.profile.check_app_permission(u'admin'),
user.profile.check_app_permission(u'purchase'),
))
return collections.OrderedDict(
client_str=TextField(_("Client"), proxy=True, editable=False,
colspan=2),
transporter_id=PersonField(_("Transporter"), proxy=True,
person_type=Transporter, colspan=2,
can_add=can_modify_transporter,
can_edit=can_modify_transporter),
address=AddressField(_("Address"), proxy=True, mandatory=True,
colspan=2),
was_delivered_check=BoolField(_("Was sent to deliver?")),
deliver_date=DateField(_("Delivery date"), mandatory=True, proxy=True),
tracking_code=TextField(_("Tracking code"), proxy=True),
was_received_check=BoolField(_("Was received by client?")),
receive_date=DateField(_("Receive date"), mandatory=True, proxy=True),
empty=EmptyField(),
)
def __init__(self, store, *args, **kwargs):
self._configuring_proxies = False
super(DeliveryEditor, self).__init__(store, *args, **kwargs)
#
# BaseEditor Hooks
#
def setup_proxies(self):
self._configuring_proxies = True
self._setup_widgets()
self._update_status_widgets()
self._configuring_proxies = False
def setup_slaves(self):
self.delivery_items = ObjectList(
columns=self._get_delivery_items_columns(),
objects=self.model.delivery_items,
)
self.delivery_items_holder.add(self.delivery_items)
self.delivery_items.show()
#
# Private
#
def _setup_widgets(self):
for widget in (self.receive_date, self.deliver_date,
self.tracking_code):
widget.set_sensitive(False)
def _update_status_widgets(self):
if self.model.status == Delivery.STATUS_INITIAL:
for widget in (self.was_delivered_check, self.was_received_check):
widget.set_active(False)
elif self.model.status == Delivery.STATUS_SENT:
self.was_delivered_check.set_active(True)
self.was_received_check.set_active(False)
elif self.model.status == Delivery.STATUS_RECEIVED:
for widget in (self.was_delivered_check, self.was_received_check):
widget.set_active(True)
else:
raise ValueError(_("Invalid status for %s") % (
self.model.__class__.__name__))
def _get_delivery_items_columns(self):
return [
Column('sellable.description', title=_('Products to deliver'),
data_type=str, expand=True, sorted=True),
Column('quantity', title=_('Quantity'), data_type=decimal.Decimal,
format_func=format_quantity),
]
#
# Callbacks
#
def on_was_delivered_check__toggled(self, button):
active = button.get_active()
# When delivered, don't let user change transporter or address
self.transporter_id.set_sensitive(not active)
self.address.set_sensitive(not active)
for widget in (self.deliver_date, self.tracking_code):
widget.set_sensitive(active)
if not self.model.deliver_date:
self.deliver_date.update(localtoday().date())
if self._configuring_proxies:
# Do not change status above
return
if active:
self.model.set_sent()
else:
self.model.set_initial()
def on_was_received_check__toggled(self, button):
active = button.get_active()
self.receive_date.set_sensitive(active)
# If it was received, don't let the user unmark was_delivered_check
self.was_delivered_check.set_sensitive(not active)
if not self.was_delivered_check.get_active():
self.was_delivered_check.set_active(True)
if not self.model.receive_date:
self.receive_date.update(localtoday().date())
if self._configuring_proxies:
# Do not change status above
return
if active:
self.model.set_received()
else:
self.model.set_sent()
| tiagocardosos/stoq | stoqlib/gui/editors/deliveryeditor.py | Python | gpl-2.0 | 12,866 | [
"VisIt"
] | 7da9991e2ed83316cf641b946fa22f4c065bd38c38f3cf2f41023d267f340712 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Tests for MDAnalysis.core.topologyattrs objects.
"""
import numpy as np
from numpy.testing import (
assert_equal,
assert_almost_equal,
)
import pytest
from MDAnalysisTests.datafiles import PSF, DCD, PDB_CHECK_RIGHTHAND_PA, MMTF
from MDAnalysisTests import make_Universe, no_deprecated_call
import MDAnalysis as mda
import MDAnalysis.core.topologyattrs as tpattrs
from MDAnalysis.core import groups
from MDAnalysis.core.topology import Topology
from MDAnalysis.exceptions import NoDataError
class DummyGroup(object):
"""Designed to mock an Group
initiate with indices, these are then available as ._ix
"""
def __init__(self, vals):
self._ix = vals
def __len__(self):
return len(self._ix)
@property
def ix(self):
return self._ix
class TopologyAttrMixin(object):
"""Mixin to test the common elements to all TopologyAttrs.
10 atoms
4 residues
2 segments
"""
# Reference data
@pytest.fixture()
def top(self):
Ridx = np.array([0, 0, 2, 2, 1, 1, 3, 3, 1, 2])
Sidx = np.array([0, 1, 1, 0])
return Topology(10, 4, 2,
attrs=[self.attrclass(self.values.copy())],
atom_resindex=Ridx,
residue_segindex=Sidx)
@pytest.fixture()
def attr(self, top):
return getattr(top, self.attrclass.attrname)
@pytest.fixture()
def universe(self, top):
return mda.Universe(top)
def test_len(self, attr):
assert len(attr) == len(attr.values)
class TestAtomAttr(TopologyAttrMixin):
"""Test atom-level TopologyAttrs.
"""
values = np.array([7, 3, 69, 9993, 84, 194, 263, 501, 109, 5873])
single_value = 567
attrclass = tpattrs.AtomAttr
def test_set_atom_VE(self):
u = make_Universe(('names',))
at = u.atoms[0]
with pytest.raises(ValueError):
setattr(at, 'name', ['oopsy', 'daisies'])
def test_get_atoms(self, attr):
result = attr.get_atoms(DummyGroup([2, 1]))
assert len(result) == 2
assert_equal(result,
self.values[[2, 1]])
def test_set_atoms_singular(self, attr):
# set len 2 Group to len 1 value
dg = DummyGroup([3, 7])
attr.set_atoms(dg, self.single_value)
assert_equal(attr.get_atoms(dg),
np.array([self.single_value, self.single_value]))
def test_set_atoms_plural(self, attr):
# set len 2 Group to len 2 values
dg = DummyGroup([3, 7])
attr.set_atoms(dg, np.array([23, 504]))
assert_equal(attr.get_atoms(dg), np.array([23, 504]))
def test_set_atoms_VE(self, attr):
# set len 2 Group to wrong length values
dg = DummyGroup([3, 7])
with pytest.raises(ValueError):
attr.set_atoms(dg, np.array([6, 7, 8, 9]))
def test_get_residues(self, attr):
"""Unless overriden by child class, this should yield values for all
atoms in residues.
"""
result = attr.get_residues(DummyGroup([2, 1]))
assert len(result) == 2
assert_equal(result,
[self.values[[2, 3, 9]], self.values[[4, 5, 8]]])
def test_get_segments(self, attr):
"""Unless overriden by child class, this should yield values for all
atoms in segments.
"""
result = attr.get_segments(DummyGroup([1]))
assert len(result) == 1
assert_equal(result,
[self.values[[4, 5, 8, 2, 3, 9]]])
class TestAtomids(TestAtomAttr):
attrclass = tpattrs.Atomids
class TestIndicesClasses(object):
@pytest.fixture()
def u(self):
return mda.Universe(PSF, DCD)
def test_cant_set_atom_indices(self, u):
with pytest.raises(AttributeError):
u.atoms.indices = 1
def test_cant_set_residue_indices(self, u):
with pytest.raises(AttributeError):
u.atoms.residues.resindices = 1
def test_cant_set_segment_indices(self, u):
with pytest.raises(AttributeError):
u.atoms.segments.segindices = 1
class TestAtomnames(TestAtomAttr):
values = np.array(['O', 'C', 'CA', 'N', 'CB', 'CG', 'CD', 'NA', 'CL', 'OW'],
dtype=object)
single_value = 'Ca2'
attrclass = tpattrs.Atomnames
@pytest.fixture()
def u(self):
return mda.Universe(PSF, DCD)
def test_prev_emptyresidue(self, u):
assert_equal(u.residues[[]]._get_prev_residues_by_resid(),
u.residues[[]])
def test_next_emptyresidue(self, u):
assert_equal(u.residues[[]]._get_next_residues_by_resid(),
u.residues[[]])
class AggregationMixin(TestAtomAttr):
def test_get_residues(self, attr):
assert_equal(attr.get_residues(DummyGroup([2, 1])),
np.array([self.values[[2, 3, 9]].sum(),
self.values[[4, 5, 8]].sum()]))
def test_get_segments(self, attr):
assert_equal(attr.get_segments(DummyGroup([1])),
np.array([self.values[[4, 5, 8, 2, 3, 9]].sum()]))
def test_get_segment(self, attr):
assert_equal(attr.get_segments(DummyGroup(1)),
np.sum(self.values[[4, 5, 8, 2, 3, 9]]))
class TestMasses(AggregationMixin):
attrclass = tpattrs.Masses
class TestCharges(AggregationMixin):
values = np.array([+2, -1, 0, -1, +1, +2, 0, 0, 0, -1])
attrclass = tpattrs.Charges
class TestResidueAttr(TopologyAttrMixin):
"""Test residue-level TopologyAttrs.
"""
single_value = 2
values = np.array([15.2, 395.6, 0.1, 9.8])
attrclass = tpattrs.ResidueAttr
def test_set_residue_VE(self, universe):
# setting e.g. resname to 2 values should fail with VE
res = universe.residues[0]
with pytest.raises(ValueError):
setattr(res, self.attrclass.singular, self.values[:2])
def test_get_atoms(self, attr):
assert_equal(attr.get_atoms(DummyGroup([7, 3, 9])),
self.values[[3, 2, 2]])
def test_get_atom(self, universe):
attr = getattr(universe.atoms[0], self.attrclass.singular)
assert_equal(attr, self.values[0])
def test_get_residues(self, attr):
assert_equal(attr.get_residues(DummyGroup([1, 2, 1, 3])),
self.values[[1, 2, 1, 3]])
def test_set_residues_singular(self, attr):
dg = DummyGroup([3, 0, 1])
attr.set_residues(dg, self.single_value)
assert_equal(attr.get_residues(dg),
np.array([self.single_value]*3, dtype=self.values.dtype))
def test_set_residues_plural(self, attr):
attr.set_residues(DummyGroup([3, 0, 1]),
np.array([23, 504, 2]))
assert_almost_equal(attr.get_residues(DummyGroup([3, 0, 1])),
np.array([23, 504, 2]))
def test_set_residues_VE(self, attr):
dg = DummyGroup([3, 0, 1])
with pytest.raises(ValueError):
attr.set_residues(dg, np.array([4.5, 5.2]))
def test_get_segments(self, attr):
"""Unless overriden by child class, this should yield values for all
atoms in segments.
"""
assert_equal(attr.get_segments(DummyGroup([0, 1, 1])),
[self.values[[0, 3]], self.values[[1, 2]], self.values[[1, 2]]])
class TestResnames(TestResidueAttr):
attrclass = tpattrs.Resnames
single_value = 'xyz'
values = np.array(['a', 'b', '', 'd'], dtype=object)
class TestICodes(TestResnames):
attrclass = tpattrs.ICodes
class TestResids(TestResidueAttr):
values = np.array([10, 11, 18, 20])
attrclass = tpattrs.Resids
@pytest.mark.xfail
def test_set_atoms(self, attr):
"""Setting the resids of atoms changes their residue membership.
"""
# moving resids doesn't currently work!
assert 1 == 2
# set with array
attr.set_atoms(DummyGroup([3, 7]), np.array([11, 20]))
assert_equal(attr.get_atoms(DummyGroup([3, 7])), np.array([11, 20]))
# set to resid that no residue has (should raise exception)
with pytest.raises(NoDataError):
attr.set_atoms(DummyGroup([3, 7]), np.array([11, 21]))
def test_set_residues(self, attr):
attr.set_residues(DummyGroup([3, 0, 1]),
np.array([23, 504, 27]))
assert_almost_equal(attr.get_residues(DummyGroup([3, 0, 1])),
np.array([23, 504, 27]))
class TestSegmentAttr(TopologyAttrMixin):
"""Test segment-level TopologyAttrs.
"""
values = np.array([-0.19, 500])
attrclass = tpattrs.SegmentAttr
def test_set_segment_VE(self):
u = make_Universe(('segids',))
seg = u.segments[0]
with pytest.raises(ValueError):
setattr(seg, 'segid', [1, 2, 3])
def test_get_atoms(self, attr):
assert_equal(attr.get_atoms(DummyGroup([2, 4, 1])),
self.values[[1, 1, 0]])
def test_get_residues(self, attr):
assert_equal(attr.get_residues(DummyGroup([1, 2, 1, 3])),
self.values[[1, 1, 1, 0]])
def test_get_segments(self, attr):
"""Unless overriden by child class, this should yield values for all
atoms in segments.
"""
assert_equal(attr.get_segments(DummyGroup([1, 0, 0])),
self.values[[1, 0, 0]])
def test_set_segments_singular(self, attr):
dg = DummyGroup([0, 1])
attr.set_segments(dg, 0.45)
assert_equal(attr.get_segments(dg), np.array([0.45, 0.45]))
def test_set_segments_plural(self, attr):
dg = DummyGroup([0, 1])
attr.set_segments(dg, np.array([23, -0.0002]))
assert_equal(attr.get_segments(dg), np.array([23, -0.0002]))
def test_set_segments_VE(self, attr):
dg = DummyGroup([0, 1])
with pytest.raises(ValueError):
attr.set_segments(dg, np.array([4, 5, 6, 7]))
class TestAttr(object):
@pytest.fixture()
def ag(self):
universe = mda.Universe(PSF, DCD)
return universe.atoms # prototypical AtomGroup
def test_principal_axes(self, ag):
assert_almost_equal(
ag.principal_axes(),
np.array([
[1.53389276e-03, 4.41386224e-02, 9.99024239e-01],
[1.20986911e-02, 9.98951474e-01, -4.41539838e-02],
[-9.99925632e-01, 1.21546132e-02, 9.98264877e-04]]))
@pytest.fixture()
def universe_pa(self):
return mda.Universe(PDB_CHECK_RIGHTHAND_PA)
def test_principal_axes_handedness(self, universe_pa):
e_vec = universe_pa.atoms.principal_axes()
assert_almost_equal(np.dot(np.cross(e_vec[0], e_vec[1]), e_vec[2]), 1.0)
def test_align_principal_axes_with_self(self, ag):
pa = ag.principal_axes()
ag.align_principal_axis(0, pa[0])
assert_almost_equal(ag.principal_axes(), pa)
def test_align_principal_axes_with_x(self, ag):
ag.align_principal_axis(0, [1, 0, 0])
# This is a very loose check that the difference is not more then 0.5.
# This is OK here because the rounding error in the calculation really
# is that big.
assert_almost_equal(np.abs(ag.principal_axes()), np.eye(3), decimal=1)
class TestCrossLevelAttributeSetting(object):
"""
Can only get attributes belonging to higher level objects
Atom.resid works!
ResidueGroup.names = ['a', 'b', 'c'] doesn't work, Atom is below Residue
Setting any attribute we can get should only work if they are the same level.
Atom.resid = 4 should fail because resid belongs to Residue not Atom
"""
u = make_Universe(('names', 'resids', 'segids'))
# component and group in each level
atomlevel = (u.atoms[0], u.atoms[:10])
residuelevel = (u.residues[0], u.residues[:5])
segmentlevel = (u.segments[0], u.segments[:2])
levels = {0: atomlevel, 1: residuelevel, 2: segmentlevel}
atomattr = 'names'
residueattr = 'resids'
segmentattr = 'segids'
attrs = {0: atomattr, 1: residueattr, 2: segmentattr}
@pytest.mark.parametrize('level_idx, level', levels.items())
@pytest.mark.parametrize('attr_idx, attr', attrs.items())
def test_set_crosslevel(self, level_idx, level, attr_idx, attr):
if level_idx == attr_idx:
# if we're on the same level, then this should work
# ie Atom.mass = 12.0 is OK!
return
component, group = level
# eg 'name', 'names' = 'names', 'names'[:-1]
singular_attr, plural_attr = attr[:-1], attr
# eg check ResidueGroup.names = 'newvalue' raises NIE
# or ResidueGroup.segids = 'newvalue' raises NIE
self._check_crosslevel_fail(group, plural_attr)
if attr_idx < level_idx:
# Segment.resid doesn't even exist as an attribute
# so we don't have to check that setting fails
# Atom.segid does exist as attribute,
# but will fail to be set
return
self._check_crosslevel_fail(component, singular_attr)
@staticmethod
def _check_crosslevel_fail(item, attr):
with pytest.raises(NotImplementedError):
setattr(item, attr, 1.0)
class TestRecordTypes(object):
def test_record_types_default(self):
u = make_Universe()
u.add_TopologyAttr('record_type')
assert u.atoms[0].record_type == 'ATOM'
assert_equal(u.atoms[:10].record_types, 'ATOM')
@pytest.fixture()
def rectype_uni(self):
# standard 125/25/5 universe
u = make_Universe()
u.add_TopologyAttr('record_type')
# first 25 atoms are ATOM (first 5 residues, first segment)
# 25 to 50th are HETATM (res 5:10, second segment)
# all after are ATOM
u.atoms[:25].record_types = 'ATOM'
u.atoms[25:50].record_types = 'HETATM'
u.atoms[50:].record_types = 'ATOM'
return u
def test_encoding(self, rectype_uni):
ag = rectype_uni.atoms[:10]
ag[0].record_type = 'ATOM'
ag[1:4].record_types = 'HETATM'
assert ag[0].record_type == 'ATOM'
assert ag[1].record_type == 'HETATM'
def test_residue_record_types(self, rectype_uni):
rt = rectype_uni.residues.record_types
assert isinstance(rt, list)
assert len(rt) == 25
# check return type explicitly
# some versions of numpy allow bool to str comparison
assert not rt[0].dtype == bool
assert (rt[0] == 'ATOM').all()
assert (rt[5] == 'HETATM').all()
def test_segment_record_types(self, rectype_uni):
rt = rectype_uni.segments.record_types
assert isinstance(rt, list)
assert len(rt) == 5
assert not rt[0].dtype == bool
assert (rt[0] == 'ATOM').all()
assert (rt[1] == 'HETATM').all()
def test_static_typing():
ta = tpattrs.Charges(['1.0', '2.0', '3.0'])
assert isinstance(ta.values, np.ndarray)
assert ta.values.dtype == float
def test_static_typing_from_empty():
u = mda.Universe.empty(3)
u.add_TopologyAttr('masses', values=['1.0', '2.0', '3.0'])
assert isinstance(u._topology.masses.values, np.ndarray)
assert isinstance(u.atoms[0].mass, float)
@pytest.mark.parametrize('level, transplant_name', (
('atoms', 'center_of_mass'),
('atoms', 'total_charge'),
('residues', 'total_charge'),
))
def test_stub_transplant_methods(level, transplant_name):
u = mda.Universe.empty(n_atoms=2)
group = getattr(u, level)
with pytest.raises(NoDataError):
getattr(group, transplant_name)()
@pytest.mark.parametrize('level, transplant_name', (
('universe', 'models'),
('atoms', 'n_fragments'),
))
def test_stub_transplant_property(level, transplant_name):
u = mda.Universe.empty(n_atoms=2)
group = getattr(u, level)
with pytest.raises(NoDataError):
getattr(group, transplant_name)
def test_warn_selection_for_strange_dtype():
err = "A selection keyword could not be automatically generated"
with pytest.warns(UserWarning, match=err):
class Star(tpattrs.TopologyAttr):
singular = "star" # turns out test_imports doesn't like emoji
attrname = "stars" # :(
per_object = "atom"
dtype = dict
class TestDeprecateBFactor:
MATCH = "use the tempfactor attribute"
@pytest.fixture()
def universe(self):
return mda.Universe(MMTF)
def test_deprecate_bfactors_get_group(self, universe):
with pytest.warns(DeprecationWarning, match=self.MATCH):
universe.atoms.bfactors
def test_deprecate_bfactors_get_atom(self, universe):
with pytest.warns(DeprecationWarning, match=self.MATCH):
assert universe.atoms[0].bfactor == universe.atoms[0].tempfactor
def test_deprecate_bfactors_set_group(self, universe):
with pytest.warns(DeprecationWarning, match=self.MATCH):
universe.atoms[:2].bfactors = [3.14, 10]
assert universe.atoms.tempfactors[0] == 3.14
assert universe.atoms.tempfactors[1] == 10
with pytest.warns(DeprecationWarning, match=self.MATCH):
assert universe.atoms.bfactors[0] == 3.14
assert universe.atoms.bfactors[1] == 10
def test_deprecate_bfactors_set_atom(self, universe):
with pytest.warns(DeprecationWarning, match=self.MATCH):
universe.atoms[0].bfactor = 3.14
assert universe.atoms[0].tempfactor == 3.14
with pytest.warns(DeprecationWarning, match=self.MATCH):
assert universe.atoms[0].bfactor == 3.14
def test_deprecate_bfactor_sel(self, universe):
with pytest.warns(DeprecationWarning, match=self.MATCH):
universe.select_atoms("bfactor 3")
class TestStringInterning:
# try and trip up the string interning we use for string attributes
@pytest.fixture
def universe(self):
u = mda.Universe.empty(n_atoms=10, n_residues=2,
atom_resindex=[0]*5 + [1] * 5)
u.add_TopologyAttr('names', values=['A'] * 10)
u.add_TopologyAttr('resnames', values=['ResA', 'ResB'])
u.add_TopologyAttr('segids', values=['SegA'])
return u
@pytest.mark.parametrize('newname', ['ResA', 'ResB'])
def test_add_residue(self, universe, newname):
newres = universe.add_Residue(resname=newname)
assert newres.resname == newname
ag = universe.atoms[2]
ag.residue = newres
assert ag.resname == newname
@pytest.mark.parametrize('newname', ['SegA', 'SegB'])
def test_add_segment(self, universe, newname):
newseg = universe.add_Segment(segid=newname)
assert newseg.segid == newname
rg = universe.residues[0]
rg.segment = newseg
assert rg.atoms[0].segid == newname
def test_issue3437(self, universe):
newseg = universe.add_Segment(segid='B')
ag = universe.residues[0].atoms
ag.residues.segments = newseg
assert 'B' in universe.segments.segids
ag2 = universe.select_atoms('segid B')
assert len(ag2) == 5
assert (ag2.ix == ag.ix).all()
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/core/test_topologyattrs.py | Python | gpl-2.0 | 20,471 | [
"MDAnalysis"
] | 46cb9054b257e006b3f17218cc4701aafb92fd0386bfac3eecbc7dbc27c4d23c |
#!/usr/bin/python
import HTSeq
import sys
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastpCommandline
import os
import os.path
import string
import argparse
from CommonFastaFunctions import runBlastParser
import time
import glob
import drmaa
import pickle
import shutil
def reverseComplement(strDNA):
basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'R':'R', 'N':'N', 'K':'K'}
strDNArevC = ''
for l in strDNA:
strDNArevC += basecomplement[l]
return strDNArevC[::-1]
def translateSeq(DNASeq):
seq=DNASeq
try:
myseq= Seq(seq)
#print myseq
protseq=Seq.translate(myseq, table=11,cds=True)
except:
try:
seq=reverseComplement(seq)
myseq= Seq(seq)
#print myseq
protseq=Seq.translate(myseq, table=11,cds=True)
except:
try:
seq=seq[::-1]
myseq= Seq(seq)
#print myseq
protseq=Seq.translate(myseq, table=11,cds=True)
except:
try:
seq=seq[::-1]
seq=reverseComplement(seq)
myseq= Seq(seq)
#print myseq
protseq=Seq.translate(myseq, table=11,cds=True)
except:
raise
return protseq
# ================================================ MAIN ================================================ #
def main():
print(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description="This program screens a set of genes in a fasta file.")
parser.add_argument('-i', nargs='?', type=str, help='List of genome files (list of fasta files)', required=True)
parser.add_argument('-g', nargs='?', type=str, help='List of genes (fasta)', required=True)
parser.add_argument('-o', nargs='?', type=str, help="Name of the output files", required=True)
parser.add_argument('-p', nargs='?', type=str, help="True to give a phyloviz output file type, false is predefined", required=False)
args = parser.parse_args()
genomeFiles = args.i
genes = args.g
phylovizinput=True
if(args.p):
phylovizinput=args.p
print ("Starting Script at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
listOfCDSDicts = []
listOfGenomes = []
listOfGenomesDict = []
fp = open(genomeFiles, 'r')
for genomeFile in fp:
genomeFile = genomeFile.rstrip('\n')
genomeFile = genomeFile.rstrip('\r')
listOfGenomes.append( genomeFile )
genomeDict = {}
fp.close()
gene_fp = open( genes, 'r')
genepath=''
basepath=''
lGenesFiles = []
argumentsList = []
for gene in gene_fp:
gene = gene.rstrip('\n')
multiple=True
gene_fp2 = HTSeq.FastaReader(gene)
for allele in gene_fp2: #new db for each allele to blast it against himself
if (len(allele.seq) % 3 != 0):
multiple=False
print str(gene)+" this gene is to be removed ffs"
break
if multiple:
lGenesFiles.append( gene )
genepath=os.path.dirname(gene)
basepath=os.path.join(genepath, "temp")
if not os.path.exists(basepath):
os.makedirs(basepath)
filepath=os.path.join(basepath,str(os.path.basename(gene))+"_argList.txt")
with open(filepath, 'wb') as f:
var = [gene, listOfGenomes]
pickle.dump(var, f)
argumentsList.append(filepath)
# callAlleles([gene, listOfGenomes, listOfCDSDicts, listOfGenomesDict])
gene_fp.close()
# ------------------------------------------------- #
# RUN PRODIGAL OVER ALL GENOMES #
# ------------------------------------------------- #
print ("Starting Prodigal at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
#poolJobs = Pool()
totgenomes= len(listOfGenomes)
"""for genome in listOfGenomes:
#print genome
#listOfCDSDicts.append(runProdigal(genome))
filepath=os.path.join(basepath,str(os.path.basename(genome))+"_ORF.txt")
with open(filepath, 'wb') as f:
var = runProdigal(genome)
pickle.dump(var, f)"""
joblist =[]
with drmaa.Session() as s:
for genome in listOfGenomes:
#print('Creating job template')
jt = s.createJobTemplate()
#print os.path.join(os.getcwd(), 'callAlleles.py')
jt.remoteCommand = os.path.join(os.getcwd(), 'runProdigal.py')
#print argList
jt.args = [str(genome),basepath]
jt.joinFiles=True
jt.nativeSpecification='-V'
jobid = s.runJob(jt)
joblist.append(jobid)
with open("jobsid.txt","a") as f:
f.write(str(genome)+"\n"+str(jobid))
print('Your job has been submitted with ID %s' % jobid)
#print('Cleaning up')
s.deleteJobTemplate(jt)
s.synchronize(joblist, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)
#for curjob in joblist:
# print 'Collecting job ' + curjob
# retval = s.wait(curjob, drmaa.Session.TIMEOUT_WAIT_FOREVER)
# print 'Job: ' + str(retval.jobId) + ' finished with status ' + str(retval.hasExited)
print ("Finishing Prodigal at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
# ----------------------------- #
# Each gene has a different job #
# ----------------------------- #
#---CDS to protein---#
listOFProt=[]
listAllCDS=[]
i = 0
j=0
for genomeFile in listOfGenomes:
listOfCDS={}
genomeProts=""
currentCDSDict = {}
#print currentCDSDict
currentGenomeDict = {}
filepath=os.path.join(basepath,str(os.path.basename(genomeFile))+"_ORF.txt")
with open(filepath,'rb') as f:
currentCDSDict = pickle.load(f)
g_fp = HTSeq.FastaReader( genomeFile )
for contig in g_fp:
sequence=str(contig.seq)
genomeDict[ contig.name ] = sequence
currentGenomeDict = genomeDict
i+=1
for contigTag,value in currentCDSDict.iteritems():
#print contigTag,value
for protein in value:
try:
seq= currentGenomeDict[ contigTag ][ protein[0]:protein[1] ].upper()
protseq=translateSeq(seq)
idstr=">"+contigTag+"&protein"+str(j)+"&"+str(protein[0])+"-"+str(protein[1])
genomeProts+=idstr+"\n"
listOfCDS[idstr]=seq
genomeProts+=str(protseq)+"\n"
except Exception as e:
print str(e)+" "+str(genomeFile)
pass
j+=1
filepath=os.path.join(basepath,str(os.path.basename(genomeFile))+"_ORF_Protein.txt")
with open(filepath, 'wb') as f:
var = listOfCDS
pickle.dump(var, f)
#listAllCDS.append(filepath)
filepath=os.path.join(basepath,str(os.path.basename(genomeFile))+"_Protein.fasta")
with open(filepath, 'wb') as f:
f.write(genomeProts)
#listOFProt.append(filepath)
#os.makedirs(os.path.join(basepath,str(os.path.basename(genomeFile)) ))
#Create_Blastdb2( filepath,os.path.join(basepath,str(os.path.basename(genomeFile)) ),str(os.path.basename(genomeFile)) )
print ("Starting Genome Blast Db creation at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
with drmaa.Session() as s:
for genomeFile in listOfGenomes:
filepath=os.path.join(basepath,str(os.path.basename(genomeFile))+"_Protein.fasta")
os.makedirs(os.path.join(basepath,str(os.path.basename(genomeFile)) ))
#Create_Blastdb2( filepath,os.path.join(basepath,str(os.path.basename(genomeFile)) ),str(os.path.basename(genomeFile)) )
#print('Creating job template')
jt = s.createJobTemplate()
#print os.path.join(os.getcwd(), 'callAlleles.py')
jt.remoteCommand = os.path.join(os.getcwd(), 'Create_Genome_Blastdb.py')
#print argList
jt.args = [filepath,os.path.join(basepath,str(os.path.basename(genomeFile)) ),str(os.path.basename(genomeFile)) ]
jt.joinFiles=True
jt.nativeSpecification='-V'
jobid = s.runJob(jt)
joblist.append(jobid)
with open("jobsid.txt","a") as f:
f.write(str(genome)+"\n"+str(jobid))
print('Your job has been submitted with ID %s' % jobid)
#print('Cleaning up')
s.deleteJobTemplate(jt)
s.synchronize(joblist, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)
print
print ("Starting Allele Calling at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
#output=callAlleles([gene, listOfGenomes, listOfCDSDicts, listOfGenomesDict])
#print output
# raise SystemExit
totloci= len(argumentsList)
joblist =[]
with drmaa.Session() as s:
for argList in argumentsList:
#print('Creating job template')
jt = s.createJobTemplate()
#print os.path.join(os.getcwd(), 'callAlleles.py')
jt.remoteCommand = os.path.join(os.getcwd(), 'callAlleles_protein2.py')
#print argList
jt.args = [str(argList),basepath]
jt.joinFiles=True
jt.nativeSpecification='-V'
jobid = s.runJob(jt)
joblist.append(jobid)
with open("jobsid.txt","a") as f:
f.write(str(argList)+"\n"+str(jobid))
print('Your job has been submitted with ID %s' % jobid)
#print('Cleaning up')
s.deleteJobTemplate(jt)
s.synchronize(joblist, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)
#for curjob in joblist:
#print 'Collecting job ' + curjob
#retval = s.wait(curjob, drmaa.Session.TIMEOUT_WAIT_FOREVER)
#print 'Job: ' + str(retval.jobId) + ' finished with status ' + str(retval.hasExited)
output=[]
for gene in lGenesFiles:
filepath=os.path.join(basepath, os.path.basename(gene)+"_result.txt")
with open(filepath,'rb') as f:
var = pickle.load(f)
output.append(var)
output2=[]
for gene in lGenesFiles:
filepath2=os.path.join(basepath, os.path.basename(gene)+"_result2.txt")
with open(filepath2,'rb') as f:
var = pickle.load(f)
output2.append(var)
print ("Finished Allele Calling at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
#delete all temp files
shutil.rmtree(basepath)
print "##################################################\n %s genomes used for %s loci" % (len(output[0][0]),len(output) )
numberexactmatches=0
for gene in output:
for gAllele in gene[0]:
if("EXC:" in gAllele):
numberexactmatches+=1
print "\n %s exact matches found out of %s" % (numberexactmatches,(len(output[0][0])*len(output)) )
print "\n %s percent of exact matches \n##################################################" % (float((numberexactmatches*100)/(len(output[0][0])*len(output))) )
print "\nWriting output files\n"
args.o = '/' + args.o
if(phylovizinput is False):
genesI=0
for geneOut in output:
i=0
for gAllele in geneOut[0]:
currentGenome = listOfGenomes[i]
currentGenome=currentGenome.split("/")
currentGenome=currentGenome[len(currentGenome)-1].split(".")
gOutFile = os.path.dirname( "./" )
finalname=(args.o).split("/")
gOutFile += "/"+str(currentGenome[0])+finalname[1]
if not os.path.isfile( gOutFile )or (i==0 and genesI==0):
aux = 'w'
else:
aux = 'a'
gAllele = '\n' + gAllele
f = open(gOutFile, aux)
f.write(gAllele + ':' + lGenesFiles[genesI])
f.close()
i+=1
genesI+=1
else:
try:
phylovout=[]
phylovout2=[]
genesnames=[]
statistics=[]
print str(output2)
for gene in lGenesFiles:
genename=gene.split("/")
#genename=genename[len(genename)-1].split(".")
genename=genename[len(genename)-1]
genesnames.append(genename)
for geneOut in output:
print str(geneOut)
gene=0
alleleschema=[]
while gene<len(output[0][0]):
genename=(geneOut[1][gene]).split("_")
if(len(genename)!=1):
alleleschema.append(genename[1])
else:
alleleschema.append(genename[0])
gene+=1
phylovout.append(alleleschema)
for geneOut in output2:
#print str(geneOut)
gene=0
alleleschema=[]
while gene<len(output2[0]):
genename=(geneOut[gene])
#print genename
#if(len(genename)!=1):
# alleleschema.append(genename[1])
#else:
alleleschema.append(genename)
gene+=1
phylovout2.append(alleleschema)
genome=0
finalphylovinput= "FILE"+ "\t"
finalphylovinput2= "FILE"+ "\t"
for geneid in genesnames:
finalphylovinput+= str(geneid)+ "\t"
finalphylovinput2+= str(geneid)+ "\t"
print finalphylovinput
print finalphylovinput2
while genome<len(listOfGenomes):
currentGenome = os.path.basename(listOfGenomes[genome])
statsaux=[0]*6 # EXC INF LNF LOT incomplete SAC
finalphylovinput+= "\n" + currentGenome + "\t"
for gene in phylovout:
val= str(gene[genome])
finalphylovinput+= val + "\t"
if "INF" in val:
statsaux[1]+=1
elif "LNF" in val:
statsaux[2]+=1
elif "LOT" in val:
statsaux[3]+=1
elif "incomplete" in val:
statsaux[4]+=1
elif "small" in val:
statsaux[5]+=1
else:
statsaux[0]+=1
genome+=1
statistics.append(statsaux)
genome=0
while genome<len(listOfGenomes):
currentGenome = os.path.basename(listOfGenomes[genome])
finalphylovinput2+= "\n" + currentGenome + "\t"
for gene in phylovout2:
val= str(gene[genome])
finalphylovinput2+= val + "\t"
genome+=1
gOutFile = os.path.dirname( "./")
gOutFile2 = os.path.dirname( "./")
gOutFile += args.o
gOutFile2 += "contigsInfo.txt"
statswrite='Stats:\tEXC\tINF\tLNF\tLOT\tincomplete\tsmall'
i=0
genome=0
while genome<len(listOfGenomes):
currentGenome = os.path.basename(listOfGenomes[genome])
statsaux=[0]*6 # EXC NA INF LNF LOT incomplete SAC
statswrite+= "\n" + currentGenome + "\t"
for k in statistics[i]:
statswrite+= str(k) + "\t"
i+=1
genome+=1
print statswrite
with open(gOutFile, 'a') as f:
f.write(finalphylovinput)
statoutfile=os.path.dirname( "./")
with open("stastics.txt", 'a') as f:
f.write(str(statswrite))
with open(gOutFile2, 'a') as f:
f.write(str(finalphylovinput2))
except Exception as e:
print e
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
print lineno
print ("Finished Script at : "+time.strftime("%H:%M:%S-%d/%m/%Y"))
if __name__ == "__main__":
main()
| mickaelsilva/pythonscripts | AlleleCalling/cluster_versions/alleleCalling_ORFbased_protein_main2.py | Python | gpl-2.0 | 13,650 | [
"BLAST",
"HTSeq"
] | 5e962d3370f11977a046ba3b2d50cb0f119c821e2867c311e8536ac2a9828b0c |
# Copyright 2007 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops=None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() # Dominance requires this
# here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser is not True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al] = curr_pop[loci_pos].get(al, 0) + 1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
work_rec._handle.close() # TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = sorted(loci[loci_pos])
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
def countPops(rec):
f2 = FileParser.read(rec.fname)
popCnt = 1
while f2.skip_population():
popCnt += 1
return popCnt
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser is not True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
# here we go again (necessary...)
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
while lParser:
if lParser is True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst=1, min_run_fst=0, limit=0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst) / 2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst) / 2
return next_parameter_fst, max_run_fst, min_run_fst
| poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PopGen/FDist/Utils.py | Python | apache-2.0 | 6,919 | [
"Biopython"
] | f2613e032a27db62f7ecb30196f54acfe8d477112ee5d4aa702600c26acc1bf9 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import dom_parser2
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://allrls.me'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSSource.net'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
q_str = ''
match = re.search('class="entry-title">([^<]+)', html)
if match:
q_str = match.group(1)
pattern = 'href="?([^" ]+)(?:[^>]+>){2}\s+\|'
for match in re.finditer(pattern, html, re.DOTALL):
url = match.group(1)
if 'adf.ly' in url:
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'quality': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = scraper_utils.blog_get_quality(video, q_str, hoster['host'])
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-3,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-4,true)"/>' % (name))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title, 'go': 'Search'}, require_debrid=True, cache_limit=1)
posts = []
for post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
match = dom_parser2.parse_dom(post.content, 'a', req='href')
if not match: continue
match_url = match[0].attrs['href']
match_title = match[0].content
match_date = dom_parser2.parse_dom(post, 'span', {'class': 'entry-date'})
posts.append('<url>%s</url><title>%s</title><date>%s</date>' % (match_url, match_title, match_date))
pattern = '<url>(?P<url>.*?)</url><title>(?P<post_title>.*?)</title><date>(?P<post_date>.*?)</date>'
date_format = '%B %d, %Y'
return self._blog_proc_results('\n'.join(posts), pattern, date_format, video_type, title, year)
| TheWardoctor/Wardoctors-repo | plugin.video.salts/scrapers/rlssource_scraper.py | Python | apache-2.0 | 4,032 | [
"ADF"
] | 79a8238d1b743a7fa956af37f288b65f882882c36a38ec0d75cef61af7f61d26 |
# Copyright (c) 2013, the GPy Authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ..core import GP
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference.expectation_propagation import EP
class GPClassification(GP):
"""
Gaussian Process classification
This is a thin wrapper around the models.GP class, with a set of sensible defaults
:param X: input observations
:param Y: observed values, can be None if likelihood is not None
:param kernel: a GPy kernel, defaults to rbf
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, Y, kernel=None,Y_metadata=None):
if kernel is None:
kernel = kern.RBF(X.shape[1])
likelihood = likelihoods.Bernoulli()
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), name='gp_classification')
| Dapid/GPy | GPy/models/gp_classification.py | Python | bsd-3-clause | 960 | [
"Gaussian"
] | 91b2722c99554e80d46bf8008d92dae42de2b9cc461a06624d2d0495770849c5 |
from pylab import *
import sys
sys.path.insert(0, '..')
from brian2 import *
stand_alone = False
if stand_alone: set_device('cpp_standalone')
else: prefs.codegen.target = 'weave'
from neurtheor import IntegralPops as IP
recurrent_conn = False#True
SRM0 = True # if False, use SRM neurons
# quasi-renewal approx for below SRM neurons
# is ~5% lower at 20Hz A0.
if __name__ == '__main__':
# neuronal constants
R = 1.0e8*ohm
tausyn = 100.0e-3*second
tau0 = 20.0e-3*second
noise = 5.0e-3*volt
uth = 20.0e-3*volt
# network constants
N = 10000
connprob = 0.1
I0 = 0.0e-3*mV/R # V/Ohm = A
#totalw_pernrn = 15.0e-3 # V, recurrent weight
totalw_pernrn = 0.0e-3*volt # V, recurrent weight
# if I0 = 10mV/R, and noise = 5 mV,
# then totalw_pernrn = 15mV/R is ~ limit
# before activity blow up at 20mV/R
w0 = totalw_pernrn/connprob/N
win = 1.0e-3*volt # V, input weight for rate below
# stimulus constants
rate0 = 100*Hz
ratemod = 50*Hz
stimfreq = 5*Hz
fullt = 1.0*second
tstep = defaultclock.dt
print "time step for simulation is",tstep
################# BRIAN2 simulation #####################
# reset eta acts as a threshold increase
model_eqns = """
u = (I + K) * R : volt
I = I0 : amp
exph = 1.0/tau0*exp((u-eta)/noise) : Hz
deta/dt = -eta/tau0 : volt
dK/dt = -K/tausyn : amp
"""
threshold_eqns = "rand()<=exph*tstep"
if SRM0: # SRM0 (exact renewal process)
reset_eqns = "eta=uth"
else: # usual SRM (approx as quasi-renewal process)
reset_eqns = "eta+=uth"
seed(100)
np.random.seed(100)
# the hazard function rho is the firing rate,
# in time dt the probability to fire is rho*dt.
# noise below is only the output noise,
# input spiking noise comes from spiking during the simulation
Nrns = NeuronGroup(N, model_eqns, \
threshold=threshold_eqns,\
reset=reset_eqns)
#Nrns.I = np.random.uniform((uth-noise)/R/amp,\
# (uth+noise)/R/amp,N)*amp
# # uniform does not retain units, hence explicit
#Nrns.I = I0
#NrnsIn = PoissonGroup(N,rates='rate0+ratemod*sin(2*pi*stimfreq*t)')
# using ratemod*sin(2*pi*stimfreq*t) doesn't work!
# hence using TimedArray()
tarray = arange(0,fullt/second,tstep/second)
ratearray = rate0/Hz + ratemod/Hz*sin(2*pi*stimfreq/Hz*tarray)
#ratearrayN = repeat([ratearray],N,axis=0) # neuron-number is row, time is col
ratestim = TimedArray(ratearray*Hz,dt=tstep)
#NrnsIn = PoissonGroup(N,rates='ratestim(t)')
# nopes, somehow it cannot take time dependent values!
# tried pure (t) and (t,i), nothing works!
# just takes the value at t=0.
NrnsIn = NeuronGroup(N, 'rho = ratestim(t) : Hz', \
threshold='rand()<rho*tstep')
# PoissonGroup doesn't seem to work with TimedArray,
# just use NeuronGroup.
conns = Synapses(NrnsIn,Nrns,pre='K += win/R')
conns.connect('i==j')
if recurrent_conn:
Syns = Synapses(Nrns, Nrns, 'w : amp', pre='K += w')
Syns.connect(True, p=connprob)
Syns.w = w0/R
spikes = SpikeMonitor(Nrns)
rates = PopulationRateMonitor(Nrns)
ratesIn = PopulationRateMonitor(NrnsIn)
run(fullt,report='text')
if stand_alone:
device.build(directory='output', compile=True,\
run=True, debug=False)
#plot(spikes.t/second, spikes.i, '.')
#A0sim = sum(len(spikes.i[where(spikes.t<settletime)[0]]))/float(N)/settletime
#print "Average rate per neuron at baseline =", A0sim
#################### Integral approach and plotting ####################
intPop = IP.IntegralPop(N,I0*R/volt,tau0/second,\
uth/volt,0.0,noise/volt,\
w0/volt,tausyn/second,connprob,win/volt,rate0/Hz)
dt = intPop.integrate_dt
mpoints = int(intPop.tupper/dt)
tarray = arange(0,fullt/second,dt)
dratearray = ratemod/Hz*sin(2*pi*stimfreq/Hz*tarray)
intPop.get_background_rate()
print "The background rate is",intPop.A0
print "The h is",intPop.h
print "synint",intPop.kernelsyntildeIntegral
print 'Evolving rate input'
Avec = intPop.evolve(tarray,dratearray,mpoints)
print 'Convolving linear rate input'
Aveclin = intPop.lin_response_rate(dratearray,dt)
figure()
plot(tarray,intPop.harray,color='blue',label='h (V)')
ylabel('h (V)',color='blue')
twinx()
binunits = 10
bindt = tstep*binunits
bins = range(int(fullt/bindt))
Nbins = len(bins)
plot([rates.t[i*binunits]/second+bindt/2.0/second\
for i in bins],\
[sum(rates.rate[i*binunits:(i+1)*binunits]/Hz)/float(binunits)\
for i in bins],\
',-g',label='sim')
plot([ratesIn.t[i*binunits]/second+bindt/2.0/second\
for i in bins],\
[sum(ratesIn.rate[i*binunits:(i+1)*binunits]/Hz)/float(binunits)\
for i in bins],\
',-c',label='inp')
plot(tarray,Avec,color='red',label='integ evolve')
plot(tarray, Aveclin,color='magenta',label='lin evolve')
ylabel('rate (Hz)',color='red')
legend()
figure()
tarray = arange(0.0,intPop.kernelInf,intPop.integrate_dt)
plot(tarray,intPop.kernelsyntilde(tarray))
show()
| adityagilra/2015_spiking_population_response | linresprate_briancompare.py | Python | gpl-3.0 | 5,906 | [
"NEURON"
] | 33746102c6835b975f82e4b8ba8437ed095243eb3bfb135ae40c5c54f2613751 |
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
__RCSID__ = "$Id$"
import sys
import getpass
import DIRAC
from DIRAC.Core.Base import Script
class Params:
proxyLoc = False
dnAsUsername = False
def setProxyLocation( self, arg ):
self.proxyLoc = arg
return DIRAC.S_OK()
def setDNAsUsername( self, arg ):
self.dnAsUsername = True
return DIRAC.S_OK()
def showVersion( self, arg ):
print "Version:"
print " ", __RCSID__
sys.exit( 0 )
return DIRAC.S_OK()
params = Params()
Script.registerSwitch( "f:", "file=", "File to use as proxy", params.setProxyLocation )
Script.registerSwitch( "D", "DN", "Use DN as myproxy username", params.setDNAsUsername )
Script.registerSwitch( "i", "version", "Print version", params.showVersion )
Script.addDefaultOptionValue( "LogLevel", "always" )
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.MyProxy import MyProxy
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations, CS
if not params.proxyLoc:
params.proxyLoc = Locations.getProxyLocation()
if not params.proxyLoc:
print "Can't find any valid proxy"
sys.exit( 1 )
print "Uploading proxy file %s" % params.proxyLoc
mp = MyProxy()
retVal = mp.uploadProxy( params.proxyLoc, params.dnAsUsername )
if not retVal[ 'OK' ]:
print "Can't upload proxy:"
print " ", retVal[ 'Message' ]
sys.exit( 1 )
print "Proxy uploaded"
sys.exit( 0 )
| avedaee/DIRAC | FrameworkSystem/scripts/dirac-myproxy-upload.py | Python | gpl-3.0 | 1,686 | [
"DIRAC"
] | 4377a3a6fba1cf22160fdcd8c405c70a0f6eafaab4d3c61e0ded3b1005def469 |
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
"""
This script demonstrates using external data (in this case a tabbed-text file) to aid in visualizing a network.
"""
import os.path, sys
from math import pi
# Load the base network.
if not any(network.muscles()):
execfile('Muscles.py')
updateProgress('Clustering the neurons...')
# Return the position of the soma using a default value if the attribute is not defined.
def somaPosition(neuron):
somaPosition = neuron.getAttribute('Soma A-P Position')
if somaPosition is not None:
somaPosition = float(somaPosition.value())
else:
if neuron.name[0] == 'R':
somaPosition = 0.9
else:
somaPosition = 0.5
return somaPosition
def sortCluster(neuron1, neuron2):
if neuron1.hasFunction(Neuron.Function.MOTOR) and neuron1.hasFunction(Neuron.Function.SENSORY):
key1 = '1' + neuron1.name
elif neuron1.hasFunction(Neuron.Function.MOTOR):
key1 = '0' + neuron1.name
elif neuron1.hasFunction(Neuron.Function.SENSORY):
key1 = '3' + neuron1.name
else:
key1 = '2' + neuron1.name
if neuron2.hasFunction(Neuron.Function.MOTOR) and neuron2.hasFunction(Neuron.Function.SENSORY):
key2 = '1' + neuron2.name
elif neuron2.hasFunction(Neuron.Function.MOTOR):
key2 = '0' + neuron2.name
elif neuron2.hasFunction(Neuron.Function.SENSORY):
key2 = '3' + neuron2.name
else:
key2 = '2' + neuron2.name
if key1 < key2:
return -1
elif key1 > key2:
return 1
else:
return 0
# Cluster the soma based on their A-P position and side.
clusters = {}
for neuron in network.neurons():
clusterKey = str(somaPosition(neuron)) + neuron.getAttribute('Side').value()
if clusterKey in clusters:
clusters[clusterKey] += [neuron]
else:
clusters[clusterKey] = [neuron]
for cluster in clusters.itervalues():
cluster.sort(sortCluster)
# Set up the visualization
display.setViewDimensions(2)
display.setShowNeuronNames(True)
display.setUseGhosts(True)
display.setDefaultFlowSpread(0.5)
updateProgress('Laying out the neurons...')
for neuron in network.neurons():
# Color the soma based on function.
red = green = blue = 0.5
if neuron.hasFunction(Neuron.Function.SENSORY):
red = 1.0
if neuron.hasFunction(Neuron.Function.INTERNEURON):
blue = 1.0
if neuron.hasFunction(Neuron.Function.MOTOR):
green = 1.0
display.setVisibleColor(neuron, (red, green, blue))
display.setLabelColor(neuron, (0.0 if red == 0.5 else red * 0.125, 0.0 if green == 0.5 else green * 0.125, 0.0 if blue == 0.5 else blue * 0.125))
# Position the soma according to their linear distance between head and tail and their left/center/right position.
somaX = somaPosition(neuron) * 4.0 - 2.0
somaY = 0.0
somaSign = 1.0
somaSide = neuron.getAttribute('Side')
if somaSide is not None:
if somaSide.value() == 'L':
somaY = -0.15
somaSign = -1.0
elif somaSide.value() == 'R':
somaY = 0.15
# Many soma have the exact same X/Y coordinates so we distribute them evenly around the common point.
clusterKey = str(somaPosition(neuron)) + neuron.getAttribute('Side').value()
cluster = clusters[clusterKey]
somaY += (len(cluster) - 1) / 2.0 * 0.015 * somaSign - cluster.index(neuron) * 0.015 * somaSign
display.setVisiblePosition(neuron, (somaX, somaY, -1.0), fixed = True)
updateProgress('Coloring the synapses...')
# Color the synapses according to excitation/inhibition and weight them according to their connection count.
ACh = library.neurotransmitter('ACh')
GABA = library.neurotransmitter('GABA')
for synapse in network.synapses():
if ACh in synapse.preSynapticNeurite.neuron().neurotransmitters:
display.setVisibleColor(synapse, (0.5, 0.5, 0.75))
elif GABA in synapse.preSynapticNeurite.neuron().neurotransmitters:
display.setVisibleColor(synapse, (0.75, 0.5, 0.5))
else:
display.setVisibleColor(synapse, (0.5, 0.5, 0.5))
display.setVisibleWeight(synapse, 0.5 if synapse.getAttribute('Count').value() < 5 else 2.0)
updateProgress('Coloring the gap junctions...')
# Make all gap junctions black and weight them according to their connection count.
for gapJunction in network.gapJunctions():
display.setVisibleColor(gapJunction, (0.0, 0.75, 0.0))
display.setVisibleWeight(gapJunction, 0.5 if gapJunction.getAttribute('Count').value() < 5 else 2.0)
updateProgress('Laying out the muscles...')
for muscle in network.muscles():
if not any(display.visiblesForObject(muscle)):
display.visualizeObject(muscle)
if muscle.getAttribute('A-P Position'):
muscleX = muscle.getAttribute('A-P Position').value() * 4.0 - 2.0
if muscle.name in ['MANAL', 'MVULVA']:
muscleX += 0.02 # Shift the muscles slightly so they don't obscure the neurons at the same position.
muscleY = 0.0
muscleSide = muscle.getAttribute('Side')
if muscleSide is not None:
if muscleSide.value() == 'L':
muscleY = -0.3
elif muscleSide.value() == 'R':
muscleY = 0.3
muscleFace = muscle.getAttribute('Face')
if muscleFace is not None:
if muscleFace.value() == 'D':
muscleY -= 0.025
elif muscleFace.value() == 'V':
muscleY += 0.025
display.setVisiblePosition(muscle, (muscleX, muscleY, -1.0), fixed = True)
display.setVisibleSize(muscle, (0.01, 0.02, .01))
updateProgress('Coloring the innervations...')
for innervation in network.innervations():
if not any(display.visiblesForObject(innervation)):
display.visualizeObject(innervation)
display.setVisibleWeight(innervation, 0.5 if innervation.getAttribute('Count').value() < 5.0 else 2.0)
display.zoomToFit()
| JaneliaSciComp/Neuroptikon | Source/Scripts/C. elegans/Physical Layout.py | Python | bsd-3-clause | 6,138 | [
"NEURON"
] | 622a7b7c868514b156f5a993005f7856386b890581ae5b9abf105a7dd5d4f8b7 |
#!/usr/bin/env python
#coding: utf-8
#### CLASSES ####
class fasta():
"""
"""
def __init__(self):
"""
"""
self.fastaDict = {}
#### FUNCTIONS ####
def fasta_reader(self, fastaFile):
"""
"""
fastaDict = {}
subHeader("Fasta reader")
fh = open(fastaFile)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in itertools.groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
header = header.next()[1:].strip()
# drop the info
header = header.split(" ")[0]
info("Reading " + header + "...")
# join all sequence lines to one.
seq = "".join(s.strip() for s in faiter.next())
fastaDict[header] = seq
self.fastaDict = fastaDict
def write_fasta(self, outFilePath):
"""
"""
outFile = open(outFilePath, "w" )
for header, seq in self.fastaDict.iteritems():
header = ">" + header
outFile.write("%s\n" % header)
outFile.write("%s\n" % seq)
# Close output fasta file
outFile.close()
class cluster():
"""
"""
def __init__(self, alignmentObj, clippedSide):
"""
"""
self.chrom = alignmentObj.reference_name
self.clippedSide = clippedSide
self.bkpPos = alignmentObj.reference_start if clippedSide == "beg" else alignmentObj.reference_end
self.clippedReadDict = {}
self.consensusSeq = ""
def addClippedRead(self, alignmentObj):
"""
"""
mate = '/1' if alignmentObj.is_read1 else '/2'
readId = alignmentObj.query_name + mate
self.bkpPos = alignmentObj.reference_start if self.clippedSide == "beg" else alignmentObj.reference_end
operation = alignmentObj.cigartuples[0][0] if self.clippedSide == "beg" else alignmentObj.cigartuples[-1][0]
clipType = "soft" if operation == 4 else "hard"
self.clippedReadDict[readId] = {}
self.clippedReadDict[readId]["alignmentObj"] = alignmentObj
self.clippedReadDict[readId]["clipType"] = clipType
def nbReads(self):
"""
"""
return len(self.clippedReadDict)
def readIdList(self):
"""
"""
return list(self.clippedReadDict.keys())
def addReadSeqs(self, fastaObj):
"""
"""
for readId in self.clippedReadDict.keys():
alignmentObj = self.clippedReadDict[readId]["alignmentObj"]
## Make the reverse complementary of reads aligned on the reverse strand
if (alignmentObj.is_reverse == True):
readSeq = rev_complement(fastaObj.fastaDict[readId])
else:
readSeq = fastaObj.fastaDict[readId]
self.clippedReadDict[readId]["seq"]= readSeq
def makeConsensusSeq(self, outDir):
"""
multiple sequence alignment based
"""
## A) Single sequence
if len(self.clippedReadDict.keys()) == 1:
consensusSeq = list(self.clippedReadDict.values())[0]["seq"].upper()
## B) Multiple sequence
else:
command = 'mkdir -p ' + outDir
os.system(command) # returns the exit status
### 1. Create fasta file containing cluster supporting reads
fastaObj = fasta()
fastaDict = {}
for readId in self.clippedReadDict.keys():
fastaDict[readId] = self.clippedReadDict[readId]["seq"]
fastaObj.fastaDict = fastaDict
fastaPath = outDir + '/supportingReads.fa'
fastaObj.write_fasta(fastaPath)
### 2. Make multiple sequence alignment
msfPath = outDir + '/supportingReads.msf'
command = 'muscle -in ' + fastaPath + ' -out ' + msfPath + ' -msf'
print command
os.system(command) # returns the exit status
### 3. Generate consensus sequence (cons tool from EMBOSS packagge)
consensusPath = outDir + '/consensus.fa'
command = 'cons -sequence ' + msfPath + ' -outseq ' + consensusPath + ' -identity 0 -plurality 0'
print command
os.system(command) # returns the exit status
### Read consensus sequence
fastaObj = fasta()
fastaObj.fasta_reader(consensusPath)
consensusSeq = fastaObj.fastaDict["EMBOSS_001"].upper()
### Do cleanup
command = 'rm ' + fastaPath + ' ' + msfPath + ' ' + consensusPath
os.system(command) # returns the exit status
## Replace '-' by 'N' for ambiguous bases:
consensusSeq = consensusSeq.replace('-', 'N')
## Convert consensus sequence into upper case:
consensusSeq = consensusSeq.upper()
return consensusSeq
#### FUNCTIONS ####
def log(label, string):
"""
Display labelled information
"""
print "[" + label + "]", string
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def rev_complement(seq):
"""
Make the reverse complementary of a dna sequence
Input:
1) seq. DNA sequence
Output:
1) revComplementSeq. Reverse complementary of input DNA sequence
"""
baseComplementDict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
seq = seq.upper()
revSeq = seq[::-1] # Make reverse sequence
letters = list(revSeq)
letters = [baseComplementDict[base] for base in letters]
revComplementSeq = ''.join(letters) # Make complement of reverse sequence
return revComplementSeq
def overlap(begA, endA, begB, endB):
"""
Check if both ranges overlap. 2 criteria for defining overlap:
## A) Begin of the range A within the range B
# *beg* <---------range_A---------->
# <---------range_B---------->
# *beg* <-------range_A----->
# <-------------range_B------------------>
## B) Begin of the range B within the range A
# <---------range_A---------->
# *beg* <---------range_B---------->
# <-------------range_A----------------->
# *beg* <-------range_B------>
"""
# a) Begin of the range A within the range B
if ((begA >= begB) and (begA <= endB)):
overlap = True
# b) Begin of the range B within the range A
elif ((begB >= begA) and (begB <= endA)):
overlap = True
# c) Ranges do not overlapping
else:
overlap = False
return overlap
def getClippedPairedClusters(chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType, bamFile, windowSize):
"""
"""
## 1. Extract clipped reads for positive cluster
chrom = chrPlus
if (rgType == "DUP"):
beg = int(begPlus) - windowSize
end = int(begPlus) + windowSize
else:
beg = int(endPlus) - windowSize
end = int(endPlus) + windowSize
print "range_+: ", chrom, beg, end
clippedBegPlusList, clippedEndPlusList = getClippedInterval(chrom, beg, end, bamFile)
## 2. Extract clipped reads for negative cluster
chrom = chrMinus
if (rgType == "DUP"):
beg = int(endMinus) - windowSize
end = int(endMinus) + windowSize
else:
beg = int(begMinus) - windowSize
end = int(begMinus) + windowSize
clippedBegMinusList, clippedEndMinusList = getClippedInterval(chrom, beg, end, bamFile)
## 3. Merge clipped read lists:
clippedBegList = list(set(clippedBegPlusList + clippedBegMinusList))
clippedEndList = list(set(clippedEndPlusList + clippedEndMinusList))
return clippedBegList, clippedEndList
def getClippedUnpairedCluster(chrPlus, begPlus, endPlus, bamFile, windowSize):
"""
"""
## 1. Extract clipped reads for cluster beginning
chrom = chrPlus
beg = int(begPlus) - windowSize
end = int(begPlus) + windowSize
clippedBegClusterBegList, clippedEndClusterBegList = getClippedInterval(chrom, beg, end, bamFile)
## 2. Extract clipped reads for cluster ending
chrom = chrPlus
beg = int(endPlus) - windowSize
end = int(endPlus) + windowSize
clippedBegClusterEndList, clippedEndClusterEndList = getClippedInterval(chrom, beg, end, bamFile)
## 3. Merge clipped read lists:
clippedBegList = list(set(clippedBegClusterBegList + clippedBegClusterEndList))
clippedEndList = list(set(clippedEndClusterBegList + clippedEndClusterEndList))
return clippedBegList, clippedEndList
def getClippedInterval(chrom, beg, end, bamFile):
'''
'''
#print "** pickClipped function **"
clippedBegList = []
clippedEndList = []
# Extract alignments in the interval
iterator = bamFile.fetch(chrom, beg, end)
# Iterate over the alignments
for alignmentObj in iterator:
### Discard unmapped reads and PCR duplicates
if (alignmentObj.is_unmapped == False) and (alignmentObj.is_duplicate == False):
firstOperation = alignmentObj.cigartuples[0][0]
lastOperation = alignmentObj.cigartuples[-1][0]
#### Cleck if soft-clipped read
# Note: soft (Operation=4) or hard clipped (Operation=5)
# Discard reads clipped both in the beginning and ending
## a) Clipping at the beginning of the read while not clipping at all at the end
# *******--------- (clipped bases: *)
if ((firstOperation == 4) or (firstOperation == 5)) and ((lastOperation != 4) and (lastOperation != 5)):
clippedBegList.append(alignmentObj)
## b) Clipping at the end of the read while not clipping at all at the beginning
# ---------******* (clipped bases: *)
elif ((lastOperation == 4) or (lastOperation == 5)) and ((firstOperation != 4) and (firstOperation != 5)):
clippedEndList.append(alignmentObj)
return clippedBegList, clippedEndList
def clusterCLipped(clippedList, clippedSide, minNbReads, maxNbReads):
'''
'''
#print "** clusterCLipped function **"
### 1. Sort the list of clipped reads in increasing coordinates order
if (clippedSide == "beg"):
clippedSortedList = sorted(clippedList, key=lambda alignmentObj: alignmentObj.reference_start, reverse=False)
else:
clippedSortedList = sorted(clippedList, key=lambda alignmentObj: alignmentObj.reference_end, reverse=False)
### 2. Make clipped read clusters:
clusterList = []
## For each clipped read alignment
for alignmentObj in clippedSortedList:
# A) No cluster in the list -> Create first cluster
if not clusterList:
clusterObj = cluster(alignmentObj, clippedSide)
clusterObj.addClippedRead(alignmentObj)
clusterList.append(clusterObj)
# B) There is already at least one cluster in the list -> Check if current clipped read within the latest cluster
else:
## Define bkp position:
bkpPos = alignmentObj.reference_start if clippedSide == "beg" else alignmentObj.reference_end
## Define cluster range for searching for overlap
lastClusterObj = clusterList[-1]
begClusterRange = lastClusterObj.bkpPos
endClusterRange = lastClusterObj.bkpPos + 3
#### Check if clipped read within cluster range
overlapping = overlap(bkpPos, bkpPos, begClusterRange, endClusterRange)
## a) Overlapping ranges, so clipped read within previous cluster interval -> add read to the cluster
if overlapping:
lastClusterObj.addClippedRead(alignmentObj)
## b) Clipped read outside previous cluster interval -> create new cluster and add it into the list
else:
clusterObj = cluster(alignmentObj, clippedSide)
clusterObj.addClippedRead(alignmentObj)
clusterList.append(clusterObj)
### 3. Filter the clusters according to the number of reads supporting them (min and max cut-offs)
filteredClusterList = []
for clusterObj in clusterList:
if (clusterObj.nbReads() >= minNbReads) and (clusterObj.nbReads() <= maxNbReads):
filteredClusterList.append(clusterObj)
return filteredClusterList
def filterNbClusters(clusterBegList, clusterEndList, maxNbClusters):
'''
'''
totalNbClusters = len(clusterBegList) + len(clusterEndList)
## A) Number of clipped clusters higher than the treshold -> Discard clusters as most likely are the consequence of
# alignment artefacts. In a perfect scenario we would expect two clusters, a single one per breakpoint
if (totalNbClusters > maxNbClusters):
filteredClusterBegList = []
filteredClusterEndList = []
## B) Pass the filter
else:
filteredClusterBegList = clusterBegList
filteredClusterEndList = clusterEndList
return filteredClusterBegList, filteredClusterEndList
def clusterInMatchedNormal(clusterTumorList, clusterNormalList):
'''
'''
clusterTumorFilteredList = []
## For each clipped cluster in the tumour
for clusterTumorObj in clusterTumorList:
filtered = False
begTumor = clusterTumorObj.bkpPos - 5
endTumor = clusterTumorObj.bkpPos + 5
## For each clipped cluster in the normal
for clusterNormalObj in clusterNormalList:
begNormal = clusterNormalObj.bkpPos - 5
endNormal = clusterNormalObj.bkpPos + 5
## Filter those clusters matching with the normal
if overlap(begTumor, endTumor, begNormal, endNormal):
filtered = True
## Cluster not matching cluster in the normal
if not filtered:
clusterTumorFilteredList.append(clusterTumorObj)
return clusterTumorFilteredList
def filterDiscordantCluster(chrom, beg, end, readPairList, bamFile):
'''
'''
nbDiscordant = len(readPairList)
nbClippedBothSides = 0
readPairFilteredList = []
## Extract alignments in the interval
iterator = bamFile.fetch(chrom, beg, end)
## Iterate over the alignments
for alignmentObj in iterator:
## Supporting discordant paired-end read and cigar available
if (alignmentObj.query_name in readPairList) and (alignmentObj.cigartuples is not None):
firstOperation = alignmentObj.cigartuples[0][0]
lastOperation = alignmentObj.cigartuples[-1][0]
### A) Read clipped both in the beginning and ending
if ((firstOperation == 4) or (firstOperation == 5)) and ((lastOperation == 4) or (lastOperation == 5)):
nbClippedBothSides += 1
### B) Read not clipped in both sides
else:
readPairFilteredList.append(alignmentObj.query_name)
## Percentage of supporting paired ends that are clipped on both sides
percClippedBothSides = float(nbClippedBothSides) / nbDiscordant * 100
## Recompute the number of supporting paired ends after removing problematic reads
readPairFilteredList = list(set(readPairFilteredList))
nbFilteredDiscordant = len(readPairFilteredList)
## Discard cluster if more than 50% supporting paired-ends clipped on both sides:
if (percClippedBothSides > 50):
print "FILTER-CLUSTER: ", nbClippedBothSides, nbDiscordant, percClippedBothSides, nbFilteredDiscordant, readPairFilteredList
readPairFilteredList = []
nbFilteredDiscordant = 0
filtered = True
else:
filtered = False
return filtered
#### MAIN ####
## Import modules ##
import argparse
import sys
import os
import time
from operator import itemgetter, attrgetter, methodcaller
import pysam
import itertools
import subprocess
# Global variables:
global debugBool ## debug logging mode. Boolean.
# Environmental variables:
PICARD = os.environ['PICARD']
## Get user's input ##
parser = argparse.ArgumentParser(description= "")
parser.add_argument('insertions', help='')
parser.add_argument('tumourBam', help='Tumour bam file')
parser.add_argument('normalBam', help='Matched normal bam file')
parser.add_argument('--windowSize', default=50, dest='windowSize', type=int, help='Window size to search for clipped read clusters from discordant read-pair clusters ends. Default=50bp' )
parser.add_argument('--minNbReads', default=1, dest='minNbReads', type=int, help='Minimum number of clipped reads composing the cluster. Default: 1' )
parser.add_argument('--maxNbReads', default=500, dest='maxNbReads', type=int, help='Maximum number of clipped reads composing the cluster. Default: 500' )
parser.add_argument('--maxNbClusters', default=10, dest='maxNbClusters', type=int, help='Maximum number of clipped read clusters in the insertion region. Default: 10' )
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
insertionsPath = args.insertions
tumourBam = args.tumourBam
normalBam = args.normalBam
windowSize = args.windowSize
minNbReads = args.minNbReads
maxNbReads = args.maxNbReads
maxNbClusters = args.maxNbClusters
outDir = args.outDir
tmpDir = outDir + '/tmp'
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "insertionsPath: ", insertionsPath
print "tumourBam: ", tumourBam
print "normalBam: ", normalBam
print "windowSize: ", windowSize
print "minNbReads: ", minNbReads
print "maxNbReads: ", maxNbReads
print "maxNbClusters: ", maxNbClusters
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
## Open input files
insertions = open(insertionsPath, 'r')
## Open donor's BAM files for reading
tumourBamFile = pysam.AlignmentFile(tumourBam, "rb")
normalBamFile = pysam.AlignmentFile(normalBam, "rb")
clustersDict = {}
discordantReadPairList = []
## Read insertions file line by line
for line in insertions:
## Ignore comment lines (e.g. header)
if line.startswith('#'):
continue
line = line.rstrip('\n')
fieldsList = line.split("\t")
## Insertion line with the expected number of columns
if (int(len(fieldsList)) == 31):
chrPlus = fieldsList[0]
begPlus = fieldsList[1]
endPlus = fieldsList[2]
nbReadsPlus = fieldsList[3]
familyPlus = fieldsList[4]
readPairListPlus = fieldsList[5].split(",")
chrMinus = fieldsList[6]
begMinus = fieldsList[7]
endMinus = fieldsList[8]
nbReadsMinus = fieldsList[9]
familyMinus = fieldsList[10]
readPairListMinus = fieldsList[11].split(",")
insertionType = fieldsList[12]
rgType = fieldsList[30]
print "###### INSERTION: ", chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType
## Add discordant read pairs to the list:
discordantReadPairList = discordantReadPairList + readPairListPlus + readPairListMinus
## Define an insertion id (insertion coordinates defined by the end
# of + cluster and beg of - cluster)
if familyPlus == 'Other': # temporary fix
familyPlus = 'SVA'
insertionId = familyPlus + ":" + insertionType + ":" + chrPlus + "_" + endPlus + "_" + begMinus
### 0. Refine discordant paired end clusters:
## A) Paired clusters
if (begMinus != "NA") and (begMinus != "UNK"):
filteredPlus = filterDiscordantCluster(chrPlus, int(begPlus), int(endPlus), readPairListPlus, tumourBamFile)
filteredMinus = filterDiscordantCluster(chrMinus, int(begMinus), int(endMinus), readPairListMinus, tumourBamFile)
## B) Unpaired cluster
else:
filteredPlus = filterDiscordantCluster(chrPlus, int(begPlus), int(endPlus), readPairListPlus, tumourBamFile)
filteredMinus = False
## Discard those insertions with a high percentage of both-sides clipped reads supporting at least one of the clusters:
if (filteredPlus == True) or (filteredMinus == True):
clusterBegFilteredList = []
clusterEndFilteredList = []
else:
### 1. Search for clipped reads
## A) Paired clusters
if (begMinus != "NA") and (begMinus != "UNK"):
clippedBegList, clippedEndList = getClippedPairedClusters(chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType, tumourBamFile, windowSize)
clippedBegNormalList, clippedEndNormalList = getClippedPairedClusters(chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType, normalBamFile, windowSize)
## B) Unpaired cluster
else:
clippedBegList, clippedEndList = getClippedUnpairedCluster(chrPlus, begPlus, endPlus, tumourBamFile, windowSize)
clippedBegNormalList, clippedEndNormalList = getClippedUnpairedCluster(chrPlus, begPlus, endPlus, normalBamFile, windowSize)
### 2. Cluster clipped reads:
### 2.1 Tumour
clusterBegList = clusterCLipped(clippedBegList, "beg", minNbReads, maxNbReads)
clusterEndList = clusterCLipped(clippedEndList, "end", minNbReads, maxNbReads)
### 2.2 Matched normal
clusterBegNormalList = clusterCLipped(clippedBegNormalList, "beg", 3, maxNbReads)
clusterEndNormalList = clusterCLipped(clippedEndNormalList, "end", 3, maxNbReads)
### 3. Filter clusters of clipped reads:
## 3.1 Filter by the number of clipped-read clusters
clusterBegList, clusterEndList = filterNbClusters(clusterBegList, clusterEndList, maxNbClusters)
## 3.2 Filter those clusters that are also in the matched normal
## Clipping at the begin
clusterBegFilteredList = clusterInMatchedNormal(clusterBegList, clusterBegNormalList)
## Clipping at the end
clusterEndFilteredList = clusterInMatchedNormal(clusterEndList, clusterEndNormalList)
### 4. Add the 2 cluster lists to the dictionary:
clustersDict[insertionId] = {}
clustersDict[insertionId]["beg"] = clusterBegFilteredList
clustersDict[insertionId]["end"] = clusterEndFilteredList
tumourBamFile.close()
normalBamFile.close()
## 2) Make fasta containing the discordant paired-end reads +
##############################################################
# the reads supporting the clusters of clipped reads
####################################################
## 1. Make list containing the discordant paired-end reads
allReadPairIdList = discordantReadPairList
## 2. Add to the list the reads supporting the clusters of clipped reads
for insertionId in clustersDict:
clusterBegList = clustersDict[insertionId]["beg"]
clusterEndList = clustersDict[insertionId]["end"]
for clusterObj in clusterBegList:
readPairIdList = [readId.split("/")[0] for readId in clusterObj.readIdList()]
allReadPairIdList = allReadPairIdList + readPairIdList
for clusterObj in clusterEndList:
readPairIdList = [readId.split("/")[0] for readId in clusterObj.readIdList()]
allReadPairIdList = allReadPairIdList + readPairIdList
allReadPairIdList = list(set(allReadPairIdList))
## 3. Make file containing the supporting read ids
readPairsPath = outDir +'/allReadPairs.txt'
readPairsFile = open(readPairsPath, 'w')
for readPairId in allReadPairIdList:
row = readPairId + "\n"
readPairsFile.write(row)
## Important to close! otherwhise next step won't work properly...
readPairsFile.close()
## 4. Extract read sequences with picard and generate fasta
readPairsFasta = outDir + '/allReadPairs.fa'
command = PICARD + ' FilterSamReads I=' + tumourBam + ' O=/dev/stdout READ_LIST_FILE=' + readPairsPath + ' FILTER=includeReadList WRITE_READS_FILES=false VALIDATION_STRINGENCY=SILENT QUIET=true | samtools fasta - > ' + readPairsFasta
print command
os.system(command)
## 3) Add to the reads supporting the clusters its complete sequence from fasta and
####################################################################################
# generate consensus sequence
##############################
fastaObj = fasta()
fastaObj.fasta_reader(readPairsFasta)
for insertionId in clustersDict:
print "********** ", insertionId, " *************"
clusterBegList = clustersDict[insertionId]["beg"]
clusterEndList = clustersDict[insertionId]["end"]
print "--- clusterBeg ---"
for clusterObj in clusterBegList:
clusterId = clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads())
consensusDir = tmpDir + '/' + clusterId
clusterObj.addReadSeqs(fastaObj)
clusterObj.consensusSeq = clusterObj.makeConsensusSeq(consensusDir)
#print "--- clusterEnd ---"
for clusterObj in clusterEndList:
clusterId = clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads())
consensusDir = tmpDir + '/' + clusterId
clusterObj.addReadSeqs(fastaObj)
clusterObj.consensusSeq = clusterObj.makeConsensusSeq(consensusDir)
## 4) For each insertion generate a fasta containing the consensus sequences for each cluster
##############################################################################################
for insertionId in clustersDict:
print "********** ", insertionId, " *************"
fastaDict = {}
clusterList = clustersDict[insertionId]["beg"] + clustersDict[insertionId]["end"]
## For each cluster
for clusterObj in clusterList:
## Include into the header the clipped read ids..
header = "cluster" + "_" + clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads()) + "\t" + ",".join(clusterObj.readIdList())
fastaDict[header] = clusterObj.consensusSeq
fastaObj = fasta()
fastaObj.fastaDict = fastaDict
## Write into the output file
fileName = insertionId + ".fa"
outFilePath = outDir + "/" + fileName
fastaObj.write_fasta(outFilePath)
### Make cleanup and finish
command = 'rm -r ' + readPairsPath + ' ' + tmpDir
os.system(command) # returns the exit status
print "***** Finished! *****"
print
| brguez/TEIBA | src/python/clusterClippedReads.paired.py | Python | gpl-3.0 | 27,306 | [
"pysam"
] | 380d803f1ad9abeabb73134bdebbe79ea815ca155b742a9a719c84a47c41b097 |
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import nanoscope
import yaml
import logging
from random import random
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import colormaps as cmps
from matplotlib.ticker import FormatStrFormatter
from matplotlib.patches import Circle
from scipy import ndimage as ndi
from skimage import exposure
from skimage import filters
from skimage import measure
from skimage.feature import peak_local_max
from skimage.morphology import watershed, disk
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
matplotlib.style.use('ggplot')
AXES_NAMES = {
"avg_axis": {"en": "Average axis (nm)", "ru": u"Средняя ось (нм)"},
"frequency": {"en": "Frequency", 'ru': u"Частота"}
}
def _get_colors(num_colors):
colors = [(1, 1, 1)] + [(random(), random(), random()) \
for i in xrange(255)]
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list('new_cmap', \
colors, N=num_colors)
return new_cmap
def next_length_pow2(x):
return 2 ** np.ceil(np.log2(abs(x)))
def band_pass_filter(fft_data, filter_large_dia=15, filter_small_dia=5):
fft_data_shape = fft_data.shape
side_len = fft_data_shape[0]
filter_large = 2.0 * filter_large_dia / side_len
filter_small = 2.0 * filter_small_dia / side_len
filter_data = np.ones_like(fft_data, dtype=np.float32)
#calculate factor in exponent of Gaussian from filterLarge / filterSmall
scale_large = filter_large ** 2
scale_small = filter_small ** 2
fft_data = fft_data.flatten()
filter_data = filter_data.flatten()
for j in np.arange(1, side_len/2):
row = j * side_len
backrow = (side_len - j) * side_len
row_fact_large = np.exp(-(j*j) * scale_large)
row_fact_small = np.exp(-(j*j) * scale_small)
for col in np.arange(1, side_len/2):
backcol = side_len - col
col_fact_large = np.exp(-(col*col) * scale_large)
col_fact_small = np.exp(-(col*col) * scale_small)
factor = (1 - row_fact_large*col_fact_large) * row_fact_small*col_fact_small
fft_data[col+row] = fft_data[col+row] * factor
fft_data[col+backrow] = fft_data[col+backrow] * factor
fft_data[backcol+row] = fft_data[backcol+row] * factor
fft_data[backcol+backrow] = fft_data[backcol+backrow] * factor
filter_data[col+row] = filter_data[col+row] * factor
filter_data[col+backrow] = filter_data[col+backrow] * factor
filter_data[backcol+row] = filter_data[backcol+row] * factor
filter_data[backcol+backrow] = filter_data[backcol+backrow] * factor
#process meeting points (maxN/2,0) , (0,maxN/2), and (maxN/2,maxN/2)
rowmid = side_len * (side_len/2)
row_fact_large = np.exp(- (side_len/2)*(side_len/2) * scale_large)
row_fact_small = np.exp(- (side_len/2)*(side_len/2) * scale_small)
fft_data[side_len/2] = fft_data[side_len/2] * (1 - row_fact_large) * row_fact_small
fft_data[rowmid] = fft_data[rowmid] * (1 - row_fact_large) * row_fact_small
fft_data[side_len/2 + rowmid] = fft_data[side_len/2 + rowmid] * \
(1 - row_fact_large * row_fact_large) * \
row_fact_small * row_fact_small
filter_data[side_len/2] = filter_data[side_len/2] * \
(1 - row_fact_large) * row_fact_small
filter_data[rowmid] = filter_data[rowmid] * \
(1 - row_fact_large) * row_fact_small
filter_data[side_len/2 + rowmid] = filter_data[side_len/2 + rowmid] * \
(1 - row_fact_large * row_fact_large) * \
row_fact_small * row_fact_small
#loop along row 0 and side_len/2
row_fact_large = np.exp(- (side_len/2)*(side_len/2) * scale_large)
row_fact_small = np.exp(- (side_len/2)*(side_len/2) * scale_small)
for col in np.arange(1, side_len/2):
backcol = side_len - col
col_fact_large = np.exp(- (col*col) * scale_large)
col_fact_small = np.exp(- (col*col) * scale_small)
fft_data[col] = fft_data[col] * (1 - col_fact_large) * col_fact_small
fft_data[backcol] = fft_data[backcol] * (1 - col_fact_large) * col_fact_small
fft_data[col+rowmid] = fft_data[col+rowmid] * \
(1 - col_fact_large*row_fact_large) * \
col_fact_small*row_fact_small
fft_data[backcol+rowmid] = fft_data[backcol+rowmid] * \
(1 - col_fact_large*row_fact_large) * \
col_fact_small*row_fact_small
filter_data[col] = filter_data[col] * (1 - col_fact_large) * col_fact_small
filter_data[backcol] = filter_data[backcol] * (1 - col_fact_large) * col_fact_small
filter_data[col+rowmid] = filter_data[col+rowmid] * \
(1 - col_fact_large*row_fact_large) * \
col_fact_small*row_fact_small
filter_data[backcol+rowmid] = filter_data[backcol+rowmid] * \
(1 - col_fact_large*row_fact_large) * \
col_fact_small*row_fact_small
#loop along column 0 and side_len/2
col_fact_large = np.exp(- (side_len/2)*(side_len/2) * scale_large)
col_fact_small = np.exp(- (side_len/2)*(side_len/2) * scale_small)
for j in np.arange(1, side_len/2):
row = j * side_len
backrow = (side_len - j) * side_len
row_fact_large = np.exp(- (j*j) * scale_large)
row_fact_small = np.exp(- (j*j) * scale_small)
fft_data[row] = fft_data[row] * (1 - row_fact_large) * row_fact_small
fft_data[backrow] = fft_data[backrow] * (1 - row_fact_large) * row_fact_small
fft_data[row+side_len/2] = fft_data[row+side_len/2] * \
(1 - row_fact_large*col_fact_large) * \
row_fact_small*col_fact_small
fft_data[backrow+side_len/2] = fft_data[backrow+side_len/2] * \
(1 - row_fact_large*col_fact_large) * \
row_fact_small*col_fact_small
filter_data[row] = filter_data[row] * \
(1 - row_fact_large) * row_fact_small
filter_data[backrow] = filter_data[backrow] * \
(1 - row_fact_large) * row_fact_small
filter_data[row+side_len/2] = filter_data[row+side_len/2] * \
(1 - row_fact_large*col_fact_large) * \
row_fact_small*col_fact_small
filter_data[backrow+side_len/2] = filter_data[backrow+side_len/2] * \
(1 - row_fact_large*col_fact_large) * \
row_fact_small*col_fact_small
fft_data = np.reshape(fft_data, fft_data_shape)
filter_data = np.reshape(filter_data, fft_data_shape)
return fft_data, filter_data
def particles_stats(segmented_data, properties, min_particle_size=5):
u_labeled_data = np.unique(segmented_data)
labeled_data = np.searchsorted(u_labeled_data, segmented_data)
stats = pd.DataFrame(columns=properties)
for region in measure.regionprops(labeled_data):
stats = stats.append({_property: region[_property] for _property in properties}, \
ignore_index=True)
return stats
def process_stats(particles_stats, pixel_scale_factor=0.512):
if 'major_axis_length' in particles_stats and 'minor_axis_length' in particles_stats:
particles_stats['avg_axis'] = (particles_stats['major_axis_length'] + \
particles_stats['minor_axis_length']) / 2.0
stats_columns = list(particles_stats.columns.values)
if 'label' in stats_columns:
stats_columns.remove('label')
def scale_values(item):
if isinstance(item, tuple):
return tuple(x / pixel_scale_factor for x in item)
else:
return item / pixel_scale_factor
particles_stats_scaled = particles_stats.copy()
particles_stats_scaled[stats_columns] = \
particles_stats_scaled[stats_columns].applymap(scale_values)
return particles_stats_scaled, particles_stats_scaled.columns.values
def segment_data(data, min_distance=5, footprint=disk(10), \
max_filt_footprint=disk(5), indices=False):
th_val = filters.threshold_otsu(data)
thresholded_particles = data > th_val
distance = ndi.distance_transform_edt(thresholded_particles)
distance = ndi.maximum_filter(distance, \
footprint=max_filt_footprint, \
mode='nearest')
local_maxi = peak_local_max(distance, min_distance=min_distance, \
indices=indices, footprint=footprint, \
labels=thresholded_particles)
labeled_data, num_features = ndi.measurements.label(local_maxi)
segmented_data = watershed(-distance, labeled_data, \
mask=thresholded_particles)
return segmented_data, local_maxi
def preprocess_data(data, small_particle=5, large_particle=15, \
min_exposure=5, max_exposure=95):
height, width = data.shape
pad_height, pad_width = next_length_pow2(height + 1), next_length_pow2(width + 1)
padded_data = np.zeros((pad_height, pad_width), dtype=np.int16)
pad_offset_y, pad_offset_x = pad_height/2 - height/2, pad_width/2 - width/2
crop_bbox = np.index_exp[pad_offset_y:pad_offset_y + height, \
pad_offset_x:pad_offset_x + width]
padded_data[crop_bbox] = data
fft_data = np.fft.fft2(padded_data)
filtered_fft_data, filter_data = band_pass_filter(fft_data, \
large_particle, \
small_particle)
ifft_data = np.fft.ifft2(filtered_fft_data)
filtered_data = ifft_data.real[crop_bbox].astype(np.float32)
p1, p2 = np.percentile(filtered_data, (min_exposure, max_exposure))
filtered_rescaled_data = exposure.rescale_intensity(filtered_data, \
in_range=(p1, p2))
return filtered_rescaled_data
def create_histogram_figure(stats, output_path, column='avg_axis', range=[], \
color='r', figsize=(8,6), bins=20, language='en', \
verbose=False):
base_filename='histogram'
filename_suffix = '.png'
filtered_data = stats[column]
if len(range) and sum(range) != 0:
filtered_data = stats[column][(stats[column] >= np.min(range)) & \
(stats[column] <= np.max(range))]
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
counts, bins, patches = ax.hist(filtered_data.values, \
bins=bins, \
color=color)
ax.set_xticks(bins)
ax.xaxis.set_major_formatter(FormatStrFormatter('%0.f'))
plt.xlabel(AXES_NAMES[column][language])
plt.ylabel(AXES_NAMES['frequency'][language])
plt.savefig(os.path.join(output_path, base_filename + '_' + \
column + filename_suffix), bbox_inches='tight')
if verbose:
plt.show()
def create_overlay_figure(data, data_mask, label_stats, filename, \
output_path, base_filename='label_overlay', \
filename_suffix='.png', figsize=(8,8), verbose=False):
if not len(label_stats.index):
log.critical('No data stats collected.')
sys.exit(1)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.0, .0, 1.0, 1.0])
ax.imshow(data, cmap=cm.gray, interpolation='bicubic')
plt.axis('off')
fig.axes[0].get_xaxis().set_visible(False)
fig.axes[0].get_yaxis().set_visible(False)
ax.imshow(data_mask, alpha=0.3, cmap=_get_colors(len(label_stats.index)), \
interpolation='bicubic')
plt.savefig(os.path.join(output_path, '_'.join([filename, base_filename]) + \
filename_suffix), bbox_inches='tight', pad_inches=0)
if verbose:
plt.show()
def create_axis_figure(data, label_stats, filename, output_path, \
base_filename='axis', file_ext='.png', figsize=(8,8), \
verbose=False):
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.0, .0, 1.0, 1.0])
ax.imshow(data, cmap=cm.gray, interpolation='bicubic')
for index, row in label_stats.iterrows():
y0, x0 = row.centroid
orientation = row.orientation
x1 = x0 + np.cos(orientation) * 0.5 * row.major_axis_length
y1 = y0 - np.sin(orientation) * 0.5 * row.major_axis_length
x2 = x0 - np.sin(orientation) * 0.5 * row.minor_axis_length
y2 = y0 - np.cos(orientation) * 0.5 * row.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=1.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=1.5)
ax.plot(x0, y0, '.g', markersize=5)
approx_particle = Circle((x0, y0), \
radius=row.avg_axis*0.5, \
edgecolor='b', \
linewidth=1, \
fill=False)
ax.add_patch(approx_particle)
plt.axis('off')
for axis in fig.axes:
axis.autoscale_view('tight')
axis.set_xlim(0, data.shape[1])
axis.set_ylim(0, data.shape[0])
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
plt.savefig(os.path.join(output_path, \
'_'.join([filename, base_filename]) + file_ext), \
bbox_inches='tight', pad_inches=0)
if verbose:
plt.show()
def process_sample(sample, verbose=False):
log.info("### %s is being processed... ###" % sample['name'])
afm_image = nanoscope.read(os.path.join(sample['input_path'], sample['filename']))
data = afm_image.height.data
properties=['label','area','centroid','equivalent_diameter', \
'major_axis_length','minor_axis_length','orientation','bbox']
if not os.path.exists(sample['output_path']):
os.makedirs(sample['output_path'])
log.info("----- Data preprocessing...")
processed_data = preprocess_data(data, \
small_particle=sample['bp_filter']['min'], \
large_particle=sample['bp_filter']['max'], \
min_exposure=sample['intensity_scale']['min'], \
max_exposure=sample['intensity_scale']['max'])
log.info("----- Segmenting...")
segmented_data, local_maxi = segment_data(processed_data, \
min_distance=sample['segmentation']['min_dist'], \
footprint=disk(sample['segmentation']['peak_footprint']), \
max_filt_footprint=disk(sample['segmentation']['filter_footprint']))
log.info("----- Collecting particle statistics...")
label_stats = particles_stats(segmented_data, properties)
log.info("----- Processing of particle statistics...")
processed_stats, columns = process_stats(label_stats, \
pixel_scale_factor=sample['pixel_scale'])
log.info("----- Overlay image creation...")
create_overlay_figure(data, segmented_data, label_stats, \
sample['name'], sample['output_path'], verbose=verbose)
log.info("----- Particle's axes image creation...")
create_axis_figure(data, label_stats, sample['name'], \
sample['output_path'], verbose=verbose)
log.info("----- Histogram plotting...")
create_histogram_figure(processed_stats, \
sample['output_path'], \
color=sample['histogram']['color'], \
range=sample['histogram']['range'], \
bins=sample['histogram']['bins'], \
language=sample['histogram']['lang'], \
figsize=tuple(sample['histogram']['figsize']),
verbose=verbose)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("param", help="path to file of samples' parameters (e.g. param.yaml)")
parser.add_argument("-v", "--verbose", action="store_true", help="show images and charts")
args = parser.parse_args()
if args.param:
try:
with open(args.param, 'r') as f:
config = yaml.load(f)
try:
samples = config['samples']
for sample in samples:
process_sample(sample, verbose=args.verbose)
except KeyError:
print "No samples in configuration file."
sys.exit(1)
except IOError:
print "Can't open %s" % args.param
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| rshkarin/afm-particle-analysis | run.py | Python | mit | 17,662 | [
"Gaussian"
] | 6f544c43399703f58a195aa88c3247050898f670ee7671f320c14cf5fcdb8e0d |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AST visiting and transformation patterns."""
from __future__ import absolute_import
from collections import deque
from copy import copy
import gast
from tangent import annotations as anno
from tangent import grammar
class TreeTransformer(gast.NodeTransformer):
"""A transformer that allows for non-local changes.
An extension of the standard `NodeTransformer` in Python's `ast` package.
This transformer can insert statements right before or after the current
statement, at the end or beginning of the current block, or at the top of the
function.
This class is meant to be subclassed in the same way as Python's
`NodeTransformer` class. The subclasses can then call the `append`,
`prepend`, etc. methods as appropriate to transform the AST.
Note that nodes that are appended or prepended using the `append` and
`prepend` methods will be visited by the transformer. This means that they
can recursively append or prepend statements of their own. This doesn't hold
for statements that are appended/prepended to the block or function body;
these inserted statements are not visited after being inserted.
To see which nodes classify as statements or which node fields classify as
blocks, please see `grammar.py`.
Attributes:
to_remove: After the initial pass, this contains a set of nodes that will
be removed. A second pass is automatically performed using the `Remove`
transformer to actually remove those nodes.
"""
def __init__(self):
self.to_insert = []
self.to_prepend = []
self.to_append = []
self.to_prepend_block = []
self.to_append_block = []
self.to_insert_top = deque()
self.to_remove = set()
self._top = True
def prepend(self, node):
"""Prepend a statement to the current statement.
Note that multiple calls to prepend will result in the last statement to be
prepended to end up at the top.
Args:
node: The statement to prepend.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
self.to_prepend[-1].appendleft(node)
def append(self, node):
"""Append a statement to the current statement.
Note that multiple calls to append will result in the last statement to be
appended to end up at the bottom.
Args:
node: The statement to append.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
self.to_append[-1].append(node)
def remove(self, node):
"""Remove the given node."""
self.to_remove.add(node)
def insert_top(self, node):
"""Insert statements at the top of the function body.
Note that multiple calls to `insert_top` will result in the statements
being prepended in that order; this is different behavior from `prepend`.
Args:
node: The statement to prepend.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
self.to_insert_top.append(node)
def prepend_block(self, node, reverse=False):
"""Prepend a statement to the current block.
Args:
node: The statement to prepend.
reverse: When called multiple times, this flag determines whether the
statement should be prepended or appended to the already inserted
statements.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
if reverse:
self.to_prepend_block[-1].appendleft(node)
else:
self.to_prepend_block[-1].append(node)
def append_block(self, node, reverse=False):
"""Append a statement to the current block.
Args:
node: The statement to prepend.
reverse: When called multiple times, this flag determines whether the
statement should be prepended or appended to the already inserted
statements.
Raises:
ValueError: If the given node is not a statement.
"""
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
if reverse:
self.to_append_block[-1].appendleft(node)
else:
self.to_append_block[-1].append(node)
def visit_statements(self, nodes):
"""Visit a series of nodes in a node body.
This function is factored out so that it can be called recursively on
statements that are appended or prepended. This allows e.g. a nested
expression to prepend a statement, and that statement can prepend a
statement again, etc.
Args:
nodes: A list of statements.
Returns:
A list of transformed statements.
"""
for node in nodes:
if isinstance(node, gast.AST):
self.to_prepend.append(deque())
self.to_append.append(deque())
node = self.visit(node)
self.visit_statements(self.to_prepend.pop())
if isinstance(node, gast.AST):
self.to_insert[-1].append(node)
elif node:
self.to_insert[-1].extend(node)
self.visit_statements(self.to_append.pop())
else:
self.to_insert[-1].append(node)
return self.to_insert[-1]
def generic_visit(self, node):
is_top = False
if self._top:
is_top = True
self._top = False
for field, old_value in gast.iter_fields(node):
if isinstance(old_value, list):
if (type(node), field) in grammar.BLOCKS:
self.to_prepend_block.append(deque())
self.to_append_block.append(deque())
self.to_insert.append(deque())
new_values = copy(self.visit_statements(old_value))
self.to_insert.pop()
else:
new_values = []
for value in old_value:
if isinstance(value, gast.AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, gast.AST):
new_values.extend(value)
continue
new_values.append(value)
if isinstance(node, gast.FunctionDef) and field == 'body':
new_values.extendleft(self.to_insert_top)
self.to_insert_top = deque([])
if (type(node), field) in grammar.BLOCKS:
new_values.extendleft(self.to_prepend_block.pop())
return_ = None
if new_values and isinstance(new_values[-1], gast.Return):
return_ = new_values.pop()
new_values.extend(self.to_append_block.pop())
if return_:
new_values.append(return_)
old_value[:] = new_values
elif isinstance(old_value, gast.AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
if is_top and self.to_remove:
Remove(self.to_remove).visit(node)
return node
class Remove(gast.NodeTransformer):
"""Remove statements containing given nodes.
If an entire block was deleted, it will delete the relevant conditional or
loop entirely. Note that deleting an entire function body will result in an
invalid AST.
Calls to user functions that were generated by Tangent will not be removed
because this might result in incorrect writing and reading from the tape.
Args:
to_remove: A set of nodes that need to be removed. Note that the entire
statement containing this node will be removed e.g. `y = f(x)` with `x`
being in `to_remove` will result in the entire statement being removed.
"""
def __init__(self, to_remove):
self.to_remove = to_remove
self.remove = False
self.is_call = False
def visit(self, node):
if node in self.to_remove:
self.remove = True
if anno.hasanno(node, 'pri_call') or anno.hasanno(node, 'adj_call'):
# We don't remove function calls for now; removing them also
# removes the push statements inside of them, but not the
# corresponding pop statements
self.is_call = True
new_node = super(Remove, self).visit(node)
if isinstance(node, grammar.STATEMENTS):
if self.remove and not self.is_call:
new_node = None
self.remove = self.is_call = False
if isinstance(node, gast.If) and not node.body:
# If we optimized away an entire if block, we need to handle that
if not node.orelse:
return
else:
node.test = gast.UnaryOp(op=gast.Not(), operand=node.test)
node.body, node.orelse = node.orelse, node.body
elif isinstance(node, (gast.While, gast.For)) and not node.body:
return node.orelse
return new_node
| google/tangent | tangent/transformers.py | Python | apache-2.0 | 9,307 | [
"VisIt"
] | c695a5024d562e5d4d878fe0edc9a625eb5cd6312752ce20bd9a0376586e2c38 |
from ase.lattice import bulk
import pyscfdump.scf as scf
from pyscfdump.helpers import build_cell
from pyscfdump.pbcfcidump import fcidump
A2B = 1.889725989 #angstrom to bohr conversion
a=3.567 #lattice parameter
ke=1000 #kinetic energy cutoff in units of rydberg
basis='sto-3g' #basis set choice
nmp = [2,1,1] #k point sampling
#prepare the cell object
ase_atom = bulk('C', 'diamond', a=a*A2B)
cell = build_cell(ase_atom, ke=ke, basis=basis, pseudo='gth-pade')
#run the HF calculation
kmf,scaled_kpts = scf.run_khf(cell, nmp=nmp, exxdiv='ewald', gamma=True)
#dump the integrals
fcidump('fcidumpfile',kmf,nmp,scaled_kpts,False)
| hande-qmc/hande | documentation/manual/tutorials/calcs/ccmc_solids/diamond_HF.py | Python | lgpl-2.1 | 636 | [
"ASE"
] | 13098d0c1718893e21d5852f623f6c5510ba443cd77bdce2932aacb0ac74a9c9 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# using naming on http://www.gslib.com/gslib_help/programs.html
import subprocess
import copy
import pandas as pd
import pygslib
import numpy as np
import os
__varmap_par = \
""" Parameters for VARMAP
*********************
START OF PARAMETERS:
{datafl} -file with data
{nvar} {ivar_} - number of variables: column numbers
{tmin} {tmax} - trimming limits
{igrid} - 1=regular grid, 0=scattered values
{nx} {ny} {nz} - if igrid=1: nx, ny, nz
{xsiz} {ysiz} {zsiz} - xsiz, ysiz, zsiz
{icolx} {icoly} {icolz} - if igrid=0: columns for x, y, z coordinates
{outfl} -file for variogram grid output
{nxlag} {nylag} {nzlag} -nxlag, nylag, nzlag
{dxlag} {dylag} {dzlag} -dxlag, dylag, dzlag
{minpairs} -minimum number of pairs
{standardize} -standardize sill? (0=no, 1=yes)
{nvarg} -number of variograms
{ivpar_} -tail, head, variogram type (array with shape [nvarg,4])
vg type 1 = traditional semivariogram
2 = traditional cross semivariogram
3 = covariance
4 = correlogram
5 = general relative semivariogram
6 = pairwise relative semivariogram
7 = semivariogram of logarithms
8 = semimadogram
9 = indicator semivariogram - continuous
10= indicator semivariogram - categorical
cut[i] is only required if ivtype[i] == 9 or == 10
"""
def varmap(parameters, gslib_path = None, silent = False, xorg=0., yorg=0., zorg=0.):
"""varmap(parameters, gslib_path = None)
Funtion to calculate variogram maps (grid) using varmap.exe external
gslib program.
Parameters
----------
parameters : dict
dictionary with parameters
gslib_path : string (default None)
absolute or relative path to gslib excecutable programs
silent: boolean
if true external GSLIB stdout text is printed
xorg, yorg, zorg: floats (default 0.)
origin of coordinated of the variogram map in the vtkImageData output
Returns
------
(pandas.DataFrame, vtkImageData) with variogram map results
Example
--------
TODO:
Notes
------
The dictionary with parameters may be as follows::
parameters = {
'datafl' : str or, None, or numpy, # path to file, or none (to use '_xxx_.in') or numpy array (with columns [x,y])
'ivar' : 1D array of int, # variables column numbers to be used in ivtail and ivhead,
'tmin' : float, # trimming limits min and max (raws out of this range will be ignored)
'tmax' : float,
'igrid' : int, # 1=regular grid, 0=scattered values
'nx':int,'ny':int,'nz':int, # if igrid=1: nx, ny, nz
'xsiz':float,'ysiz':float,'zsiz':float, # if igrid=1: xsiz, ysiz, zsiz
'icolx':int,'icoly':int,'icolz':int, # if igrid=0: columns for x, y, z coordinates
'outfl': str or None, # path to the output file or None (to use '_xxx_.out')
'nxlag':int,'nylag':int,'nzlag':int, # nxlag, nylag, nzlag
'dxlag':float,'dylag':float,'dzlag':float, # dxlag, dylag, dzlag
'minpairs': int, # minimum number of pairs
'standardize': int, # standardize sill? (0=no, 1=yes)
'ivpar': 2D array of int # tail, head, variogram type, and cut (with shape [nvarg,4])
}
ivtype 1 = traditional semivariogram
2 = traditional cross semivariogram
3 = covariance
4 = correlogram
5 = general relative semivariogram
6 = pairwise relative semivariogram
7 = semivariogram of logarithms
8 = semimadogram
9 = indicator semivariogram - continuous
10= indicator semivariogram - categorical
see http://www.gslib.com/gslib_help/varmap.html for more information
"""
if gslib_path is None:
if os.name == "posix":
gslib_path = '~/gslib/varmap'
else:
gslib_path = 'c:\\gslib\\varmap.exe'
mypar = copy.deepcopy(parameters)
# handle the case where input is an array an not a file
if isinstance(parameters['datafl'], np.ndarray):
mypar['datafl']='_xxx_.in'
if parameters['datafl'].ndim<2:
parameters['datafl']= parameters['datafl'].reshape([parameters['datafl'].shape[0],1])
if mypar['igrid']== 1:
mypar['ivar'] = np.arange(parameters['datafl'].shape[1])+1
else:
mypar['ivar'] = np.arange(parameters['datafl'].shape[1]-3)+1
mypar['icolx']=1
mypar['icoly']=2
mypar['icolz']=3
with open('_xxx_.in',"w") as f:
f.write('temp file '+'\n')
f.write('{}'.format(parameters['datafl'].shape[1])+'\n')
if mypar['igrid']==1:
for i in range(parameters['datafl'].shape[1]):
f.write('v{}\n'.format(i+1))
else:
f.write('x\ny\nz\n')
for i in range(3,parameters['datafl'].shape[1]):
f.write('v{}\n'.format(i-2))
np.savetxt(f,parameters['datafl'])
elif parameters['datafl'] is None:
mypar['datafl']='_xxx_.in'
# some updates to ensure the parameterfile is good
if mypar['igrid']==0:
mypar['nx']= 0
mypar['ny']= 0
mypar['nz']= 0
mypar['xsiz']= 0
mypar['ysiz']= 0
mypar['zsiz']= 0
if mypar['igrid']==1:
mypar['icolx']= 0
mypar['icoly']= 0
mypar['icolz']= 0
if mypar['outfl'] is None:
mypar['outfl'] = '_xxx_.out'
ivpar = np.array (mypar['ivpar'])
assert (ivpar.shape[1]==4)
assert (set(ivpar[:,0]).issubset(set(mypar['ivar']))) # head variable
assert (set(ivpar[:,1]).issubset(set(mypar['ivar']))) # tail variable
assert (set(ivpar[:,2]).issubset(set([1,2,3,4,5,6,7,8,9,10]))) # ivtype
for i in range(ivpar.shape[0]):
if ivpar[i,2]<9:
ivpar[i,3] = None
else:
if ivpar[i,3]==None:
raise NameError('gslib varmap Error inparameter file: cut[{}]=None'.format(i))
# prepare parameter file and save it
mypar['nvar'] = len(mypar['ivar'])
mypar['ivar_'] = ' '.join(map(str, mypar['ivar'])) # array to string
mypar['nvarg'] = ivpar.shape[0]
mypar['ivpar_'] = pd.DataFrame.to_string(pd.DataFrame(ivpar),index= False, header=False) # array to string
par = __varmap_par.format(**mypar)
print (par)
fpar ='_xxx_.par'
with open(fpar,"w") as f:
f.write(par)
# call pygslib
# this construction can be used in a loop for parallel execution
p=subprocess.Popen([gslib_path, fpar],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
result = p.returncode
p.wait()
if p.returncode!=0:
raise NameError('gslib declus NameError' + str(stderr.decode('utf-8')))
if ~silent:
try:
print (stdout.decode('utf-8'))
except:
print (stdout)
# return results as panndas array
nxg=mypar['nxlag']*2+1
nyg=mypar['nylag']*2+1
nzg=mypar['nzlag']*2+1
result = pygslib.gslib.read_gslib_file(mypar['outfl'])
result['ID'] = np.repeat(np.arange(ivpar.shape[0]), nxg*nyg*nzg) # add variogram id
# prepare array of vtk obfect with variogram map
vmaps = []
for i in range(mypar['nvarg']):
vmaps.append(pygslib.vtktools.grid2vtkImageData(nx=nxg, ny=nyg, nz=nzg,
xorg=xorg, yorg=yorg, zorg=zorg,
dx=mypar['dxlag'],
dy=mypar['dylag'],
dz=mypar['dzlag'],
cell_data=result[result['ID']==i].to_dict(orient='list')))
return result, vmaps
| opengeostat/pygslib | sandbox/varmap.py | Python | mit | 8,397 | [
"VTK"
] | 54cd5c79b0be737be7aff246df6142c3d2a1700c39c3589b55e6c92e771a7388 |
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import subprocess as sp
import nexus_addon as na
def locate_bundle_input(path):
# locate bunled QMCPACK input
fidx = 0
out_tokens = sp.check_output('ls %s/*.in' %path
,shell=True).split('\n')[:-1]
good_in = 0
if len(out_tokens) > 1:
for i in range(len(out_tokens)):
fname = out_tokens[i]
if not fname.endswith('.qsub.in'):
good_in += 1
fidx = i
# end if
# end for
# end if
if (len(out_tokens) != 1) and (good_in != 1) :
raise NotImplementedError(
'%d inputs found in %s, can only handle 1' % (len(out_tokens),path)
)
# end if
return out_tokens[fidx]
# end def
def collect_raw_data(paths,skip_failed=False,db_name='twists.json',verbose=True):
failed = False
# initialize analyzer
from qmca import QBase
options = {"equilibration":"auto"}
QBase.options.transfer_from(options)
for path in paths:
if verbose:
print "getting raw data from %s" % path
# end if
target_json = os.path.join(path,db_name)
if os.path.isfile(target_json):
continue
# end if
bundled_input = locate_bundle_input(path)
igroup_dict = {}
with open(bundled_input,'r') as f:
igroup = 0
for line in f:
infile = line.strip('\n')
igroup_dict[infile] = igroup
igroup += 1
# end for line
# end with open
# make a database of all the scalar files
data = []
for qmc_input in igroup_dict.keys():
entry = na.scalars_from_input(os.path.join(path,qmc_input)
,skip_failed=skip_failed,igroup=igroup_dict[qmc_input])
data.append(entry)
# end for
df = pd.DataFrame([entry for sublist in data for entry in sublist])
# save raw data in local directory
if len(df)!=0:
pd.concat([df,df['settings'].apply(pd.Series)],axis=1).to_json(target_json)
else:
failed=True
# end if
# end for path
return failed
# end def collect_raw_data
def average_twists(paths,src_db_name='twists.json',tar_db_name='scalars.json',manual_twists=None,verbose=True,skip_failed=False):
failed = False
for path in paths:
if verbose:
print "averaging twists in %s" % path
# end if
target_json = os.path.join(path,tar_db_name)
if os.path.isfile(target_json):
continue
# end if
source_json = os.path.join(path,src_db_name)
# load local data
if not os.path.exists(source_json):
msg = 'cannot locate %s, is it collected?' % source_json
if skip_failed:
print msg
continue
else:
raise IOError(msg)
# end if
# end if
df_all = pd.read_json(source_json)
df = df_all # may select twists
if manual_twists is not None:
sel = df_all['twistnum'].apply(lambda x:x in manual_twists)
df = df_all[sel]
if len(manual_twists) != len(df[df['iqmc']==0]):
raise NotImplementedError('wanted %d twists, found %d'%(
len(manual_twists),len(df[df['iqmc']==0])) )
# end if
# end if
# exclude columns that don't need to be averaged, add more as needed
special_colmns = ['iqmc','method','path','settings','vol_unit','volume']
columns_to_average = df.drop(special_colmns,axis=1).columns
mean_names = []
error_names= []
for col_name in columns_to_average:
if col_name.endswith('_mean'):
mean_names.append(col_name)
elif col_name.endswith('_error'):
error_names.append(col_name)
# end if
# end for col_name
col_names = []
for iname in range(len(mean_names)):
mname = mean_names[iname].replace('_mean','')
ename = error_names[iname].replace('_error','')
assert mname == ename
col_names.append(mname)
# end for i
# perform twist averaging
new_means = df.groupby('iqmc')[mean_names].apply(np.mean)
ntwists = len(df[df['iqmc']==0]) # better way to determine ntwists?
new_errors = df.groupby('iqmc')[error_names].apply(
lambda x:np.sqrt((x**2.).sum())/ntwists)
# make averaged database
dfev = pd.merge(new_means.reset_index(),new_errors.reset_index())
extras = df[special_colmns].groupby('iqmc').apply(lambda x:x.iloc[0])
newdf = pd.merge( extras.drop('iqmc',axis=1).reset_index(), dfev)
newdf.to_json(target_json)
# end for
return failed
# end def average_twists
if __name__ == '__main__':
import argparse
# parse command line input for trace file name
parser = argparse.ArgumentParser(description='collect DMC data from a directory of QMCPACK runs')
parser.add_argument('src_dir', type=str, help='directory containing QMCPACK runs')
parser.add_argument('final_target_json', type=str, help='json file to save collected data')
parser.add_argument('-rid','--rundir_id', type=str,default='dmc_444',help='run directory identifier, default dmc_444')
parser.add_argument('-r','--only_real', action='store_true', help='only use real twists out of 64 twists')
parser.add_argument('-v','--verbose', action='store_true', help='report progress')
parser.add_argument('-skipf','--skip_failed', action='store_true', help='skip failed runs')
args = parser.parse_args()
# parsed inputs
# specify source data directory and target database file
src_dir = args.src_dir
final_target_json = args.final_target_json
only_real = args.only_real
rundir_id = args.rundir_id
verbose = args.verbose
# hard-coded inputs
sfile_name = 'scalars.json'
if only_real:
sfile_name = 'new_real_' + sfile_name
# end if
# get twist run locations
proc = sp.Popen(['find',src_dir,'-name',rundir_id]
,stdout=sp.PIPE,stderr=sp.PIPE)
out,err = proc.communicate()
paths = out.split('\n')[:-1]
# collect raw data in local directories
failed = collect_raw_data(paths,skip_failed=args.skip_failed)
if failed and not args.skip_failed:
raise NotImplementedError('raw data collection failed')
# end if
# store twist-averaged data in local directories
mol_manual_twists = [0,2,8,10,32,34,40,42] # for 64 twists
i4_manual_twists = [0,4,32,36,256,260,288,292] # for 512 twists
if only_real:
failed = average_twists(paths,tar_db_name=sfile_name,manual_twists=mol_manual_twists,skip_failed=args.skip_failed)
else: # average over all twists
failed = average_twists(paths,tar_db_name=sfile_name,skip_failed=args.skip_failed)
# end if
if failed:
raise NotImplementedError('twist average failed')
# end if
# analyze data
import dmc_database_analyzer as dda
data = []
for path in paths:
print "analyzing %s" % path
jfile = os.path.join(path,sfile_name)
if not os.path.exists(jfile):
msg = 'failed to find %s' % jfile
if args.skip_failed:
print msg
continue
else:
raise IOError(msg)
# end if
# end if
local_scalars = pd.read_json(jfile)
extrap_scalars= dda.process_dmc_data_frame(local_scalars)
data.append(extrap_scalars)
# end for path
df = pd.concat(data).reset_index().drop('index',axis=1)
df.to_json(final_target_json)
# end __main__
| Paul-St-Young/solid_hydrogen | gather_static_twists.py | Python | mit | 7,912 | [
"QMCPACK"
] | 60462160f64ec56ff0c52608f53ec137cab88c07606e18925f4a394046554636 |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-04-26 09:20:35
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-04-26 09:20:35 by Brian Cherinka
Last Modified On: 2016-04-26 09:20:35 by Brian
'''
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.event import listen
from sqlalchemy.pool import Pool
from marvin.core import caching_query
from marvin.db.caching import regions
# from marvin import config
# from hashlib import md5
# from dogpile.cache.region import make_region
# import os
# # DOGPILE CACHING SETUP
# # dogpile cache regions. A home base for cache configurations.
# regions = {}
# dogpath = os.environ.get('MANGA_SCRATCH_DIR', None)
# if dogpath and os.path.isdir(dogpath):
# dogpath = dogpath
# else:
# dogpath = os.path.expanduser('~')
# dogroot = os.path.join(dogpath, 'dogpile_data')
# if not os.path.isdir(dogroot):
# os.makedirs(dogroot)
# # make an nsa region
# def make_nsa_region(name):
# ''' make a dogpile cache region for NSA sample queries
# Creates a file-based cache with expiration time of 1 hour
# Parameter:
# name (str):
# The name of the cache region
# '''
# reg = make_region(key_mangler=md5_key_mangler).configure(
# 'dogpile.cache.dbm',
# expiration_time=3600,
# arguments={'filename': os.path.join(dogroot, '{0}_cache.dbm'.format(name))}
# )
# return reg
# # db hash key
# def md5_key_mangler(key):
# """Receive cache keys as long concatenated strings;
# distill them into an md5 hash.
# """
# return md5(key.encode('ascii')).hexdigest()
# # configure the "default" cache region.
# regions['default'] = make_region(
# # the "dbm" backend needs string-encoded keys
# key_mangler=md5_key_mangler
# ).configure(
# # using type 'file' to illustrate
# # serialized persistence. Normally
# # memcached or similar is a better choice
# # for caching.
# # 'dogpile.cache.dbm', # file-based backend
# 'dogpile.cache.memcached', # memcached-based backend
# expiration_time=3600,
# arguments={
# 'url': "127.0.0.1:11211" # memcached option
# # "filename": os.path.join(dogroot, "cache.dbm") # file option
# }
# )
# # cache regions for NSA tables
# for mpl in config._allowed_releases.keys():
# nsacache = 'nsa_{0}'.format(mpl.lower().replace('-', ''))
# regions[nsacache] = make_nsa_region(nsacache)
def clearSearchPathCallback(dbapi_con, connection_record):
'''
When creating relationships across schema, SQLAlchemy
has problems when you explicitly declare the schema in
ModelClasses and it is found in search_path.
The solution is to set the search_path to "$user" for
the life of any connection to the database. Since there
is no (or shouldn't be!) schema with the same name
as the user, this effectively makes it blank.
This callback function is called for every database connection.
For the full details of this issue, see:
http://groups.google.com/group/sqlalchemy/browse_thread/thread/88b5cc5c12246220
dbapi_con - type: psycopg2._psycopg.connection
connection_record - type: sqlalchemy.pool._ConnectionRecord
'''
cursor = dbapi_con.cursor()
cursor.execute('SET search_path TO "$user",functions,public')
dbapi_con.commit()
listen(Pool, 'connect', clearSearchPathCallback)
class DatabaseConnection(object):
'''This class defines an object that makes a connection to a database.
The "DatabaseConnection" object takes as its parameter the SQLAlchemy
database connection string.
This class is best called from another class that contains the
actual connection information (so that it can be reused for different
connections).
This class implements the singleton design pattern. The first time the
object is created, it *requires* a valid database connection string.
Every time it is called via:
db = DatabaseConnection()
the same object is returned and contains the connection information.
'''
_singletons = dict()
def __new__(cls, database_connection_string=None, expire_on_commit=True):
"""This overrides the object's usual creation mechanism."""
if cls not in cls._singletons:
assert database_connection_string is not None, "A database connection string must be specified!"
cls._singletons[cls] = object.__new__(cls)
# ------------------------------------------------
# This is the custom initialization
# ------------------------------------------------
me = cls._singletons[cls] # just for convenience (think "self")
me.database_connection_string = database_connection_string
# change 'echo' to print each SQL query (for debugging/optimizing/the curious)
me.engine = create_engine(me.database_connection_string, echo=False, pool_size=10, pool_recycle=1800)
me.metadata = MetaData()
me.metadata.bind = me.engine
me.Base = declarative_base(bind=me.engine)
me.Session = scoped_session(sessionmaker(bind=me.engine, autocommit=True,
query_cls=caching_query.query_callable(regions),
expire_on_commit=expire_on_commit))
# ------------------------------------------------
return cls._singletons[cls]
| sdss/marvin | python/marvin/db/DatabaseConnection.py | Python | bsd-3-clause | 5,778 | [
"Brian"
] | 0d8d2822a173d6f02229563d92283f02ebb0d8223656ce8c3a4495b9f9ac5308 |
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_random_grid_search:
"""
This class is created to test the three stopping conditions for randomized gridsearch using
GLM Binomial family. The three stopping conditions are :
1. max_runtime_secs:
2. max_models:
3. metrics. We will be picking 2 stopping metrics to test this stopping condition with. One metric
will be optimized if it increases and the other one should be optimized if it decreases.
I have written 4 tests:
1. test1_glm_random_grid_search_model_number: this test will not put any stopping conditions
on randomized search. The purpose here is to make sure that randomized search will give us all possible
hyper-parameter combinations.
2. test2_glm_random_grid_search_max_model: this test the stopping condition of setting the max_model in
search criteria;
3. test3_glm_random_grid_search_max_runtime_secs: this test the stopping condition max_runtime_secs
in search criteria;
4. test4_glm_random_grid_search_metric: this test the stopping condition of using a metric which can be
increasing or decreasing.
"""
# parameters set by users, change with care
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/binomial_training1_set.csv"
json_filename = "random_gridsearch_GLM_binomial_hyper_parameter_" + curr_time + ".json"
allowed_diff = 0.5 # error tolerance allowed
allowed_time_diff = 1e-1 # fraction of max_runtime_secs allowed for max run time stopping criteria
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
max_int_val = 1000 # maximum size of random integer values
min_int_val = 0 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = 0.0 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
lambda_scale = 100 # scale lambda value to be from 0 to 100 instead of 0 to 1
max_runtime_scale = 3 # scale the max runtime to be different from 0 to 1
one_model_time = 0 # time taken to build one barebone model
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
max_model_number = 0 # maximum number of models specified to test for stopping conditions, generated later
max_grid_runtime = 1 # maximum runtime value in seconds, 1 minute max
allowed_scaled_overtime = 1 # used to set max_allowed_runtime as allowed_scaled_overtime * total model run time
allowed_scaled_time = 1 # scale back time
allowed_scaled_model_number = 1.5 # used to set max_model_number as
# possible_number_models * allowed_scaled_model_number
max_stopping_rounds = 5 # maximum stopping rounds allowed to be used for early stopping metric
max_tolerance = 0.01 # maximum tolerance to be used for early stopping metric
family = 'binomial' # set gaussian as default
test_name = "pyunit_glm_binomial_gridsearch_randomdiscrete_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
total_test_number = 5 # number of tests carried out
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = {}
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power'] # do not need these
# these are supposed to be gridable but not really
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
# these are excluded for extracting parameters to manually build H2O GLM models
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
correct_model_number = 0 # count number of models built with correct hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self, family):
"""
Constructor.
:param family: distribution family for tests
:return: None
"""
self.setup_data() # setup_data training data
self.setup_grid_params() # setup_data grid hyper-parameters
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self):
"""
This function setup the randomized gridsearch parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.one_model_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.one_model_time))
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# give the user opportunity to pre-assign hyper parameters for fixed values
self.hyper_params = {}
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo', "Stratified"]
self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
# randomly generate griddable parameters
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)
# change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
time_scale = self.max_runtime_scale * self.one_model_time
# change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
self.hyper_params["max_runtime_secs"]]
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# save hyper-parameters in sandbox and current test directories.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.hyper_params)
def tear_down(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
# write out the jenkins job info into log files.
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name):
"""
This test is used to make sure the randomized gridsearch will generate all models specified in the
hyperparameters if no stopping condition is given in the search criterion.
:param metric_name: string to denote what grid search model should be sort by
:return: None
"""
print("*******************************************************************************************")
print("test1_glm_random_grid_search_model_number for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here, random discrete and find all models
search_criteria = {'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": int(round(time.time()))}
print("GLM Binomial grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
random_grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# compare number of models built from both gridsearch
if not (len(random_grid_model) == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
"possible model number {0} and randomized gridsearch model number {1} are not "
"equal.".format(self.possible_number_models, len(random_grid_model)))
else:
self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) # time taken to build all models
if self.test_failed_array[self.test_num] == 0:
print("test1_glm_random_grid_search_model_number for GLM: passed!")
self.test_num += 1
sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self):
"""
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The
max_models parameter is randomly generated. If it is higher than the actual possible number of models
that can be generated with the current hyper-space parameters, randomized grid search should generate
all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
"""
print("*******************************************************************************************")
print("test2_glm_random_grid_search_max_model for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here
self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models))
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number,
"seed": int(round(time.time()))}
print("GLM Binomial grid search_criteria: {0}".format(search_criteria))
print("Possible number of models built is {0}".format(self.possible_number_models))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
number_model_built = len(grid_model) # count actual number of models built
print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"],
number_model_built))
if self.possible_number_models >= self.max_model_number: # stopping condition restricts model number
if not (number_model_built == self.max_model_number):
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} "
"does not match stopping condition number{1}.".format(number_model_built, self.max_model_number))
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
else: # stopping condition is too loose
if not (number_model_built == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal "
"to possible model number {1}.".format(number_model_built, self.possible_number_models))
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
self.test_num += 1
sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self):
"""
This function will test the stopping criteria max_runtime_secs. For each model built, the field
run_time actually denote the time in ms used to build the model. We will add up the run_time from all
models and check against the stopping criteria max_runtime_secs. Since each model will check its run time
differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to
build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On
the other hand, deeplearning may check the time it has spent after every epoch of training.
If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
percentage, we will consider the test a success.
:return: None
"""
print("*******************************************************************************************")
print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
h2o.cluster_info()
if "max_runtime_secs" in list(self.hyper_params):
del self.hyper_params['max_runtime_secs']
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# setup_data our stopping condition here
max_run_time_secs = random.uniform(self.one_model_time, self.max_grid_runtime)
max_run_time_secs = random.uniform(self.one_model_time, self.allowed_scaled_time*self.max_grid_runtime)
search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
"seed": int(round(time.time()))}
# search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}
print("GLM Binomial grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)
print("Maximum time limit is {0}. Time taken to build all model is "
"{1}".format(search_criteria["max_runtime_secs"], actual_run_time_secs))
print("Maximum model number is {0}. Actual number of models built is {1}".format(self.possible_number_models,
len(grid_model)))
if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
if len(grid_model) > self.possible_number_models: # generate too many models, something is wrong
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models "
" which exceeds maximum possible model number {1}".format(len(grid_model),
self.possible_number_models))
elif len(grid_model) == 1: # will always generate 1 model
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}"
" seconds which exceeds allowed time {1}".format(actual_run_time_secs,
max_run_time_secs*(1+self.allowed_diff)))
self.test_num += 1
sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better):
"""
This function will test the last stopping condition using metrics.
:param metric_name: metric we want to use to test the last stopping condition
:param bigger_is_better: higher metric value indicates better model performance
:return: None
"""
print("*******************************************************************************************")
print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family)
h2o.cluster_info()
search_criteria = {
"strategy": "RandomDiscrete",
"stopping_metric": metric_name,
"stopping_tolerance": random.uniform(1e-8, self.max_tolerance),
"stopping_rounds": random.randint(1, self.max_stopping_rounds),
"seed": int(round(time.time()))
}
print("GLM Binomial grid search_criteria: {0}".format(search_criteria))
# add max_runtime_secs back into hyper-parameters to limit model runtime.
self.hyper_params["max_runtime_secs"] = [0.3] # arbitrarily set
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# bool indicating if randomized grid search has calculated the early stopping condition correctly
stopped_correctly = \
pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria,
self.possible_number_models)
if stopped_correctly:
print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ")
self.test_num += 1
def test_random_grid_search_for_glm():
"""
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian
or Binomial families.
:return: None
"""
# randomize grid search for Binomial
test_glm_binomial_random_grid = Test_glm_random_grid_search("binomial")
test_glm_binomial_random_grid.test1_glm_random_grid_search_model_number("logloss(xval=True)")
test_glm_binomial_random_grid.test2_glm_random_grid_search_max_model()
test_glm_binomial_random_grid.test3_glm_random_grid_search_max_runtime_secs()
test_glm_binomial_random_grid.test4_glm_random_grid_search_metric("logloss", False)
test_glm_binomial_random_grid.test4_glm_random_grid_search_metric("AUC", True)
# test_glm_binomial_random_grid.tear_down()
# exit with error if any tests have failed
if test_glm_binomial_random_grid.test_failed > 0:
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_random_grid_search_for_glm)
else:
test_random_grid_search_for_glm()
| mathemage/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_binomial_gridsearch_randomdiscrete_large.py | Python | apache-2.0 | 23,920 | [
"Gaussian"
] | 8b04d3873f3c84173f9562293e0a2ab9a09f126130ea8a3a7cdbee5aa0b2d7bc |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012 Nick Hall
# Copyright (C) 2011-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" GUI dialog for creating and managing books """
# Written by Alex Roitman,
# largely based on the BaseDoc classes by Don Allingham
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".Book")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from ...listmodel import ListModel
from gramps.gen.errors import FilterError, ReportError
from ...pluginmanager import GuiPluginManager
from ...dialog import WarningDialog, ErrorDialog, QuestionDialog2
from gramps.gen.plug.menu import PersonOption, FamilyOption
from gramps.gen.plug.docgen import StyleSheet
from ...managedwindow import ManagedWindow, set_titles
from ...glade import Glade
from ...utils import is_right_click, open_file_with_default_application
from ...user import User
from .. import make_gui_option
# Import from specific modules in ReportBase
from gramps.gen.plug.report import BookList, Book, BookItem, append_styles
from gramps.gen.plug.report import CATEGORY_BOOK, book_categories
from gramps.gen.plug.report._options import ReportOptions
from ._reportdialog import ReportDialog
from ._docreportdialog import DocReportDialog
#------------------------------------------------------------------------
#
# Private Constants
#
#------------------------------------------------------------------------
_UNSUPPORTED = _("Unsupported")
_RETURN = Gdk.keyval_from_name("Return")
_KP_ENTER = Gdk.keyval_from_name("KP_Enter")
#------------------------------------------------------------------------
#
# Private Functions
#
#------------------------------------------------------------------------
def _initialize_options(options, dbstate, uistate):
"""
Validates all options by making sure that their values are consistent with
the database.
menu: The Menu class
dbase: the database the options will be applied to
"""
if not hasattr(options, "menu"):
return
dbase = dbstate.get_database()
if dbase.get_total() == 0:
return
menu = options.menu
for name in menu.get_all_option_names():
option = menu.get_option_by_name(name)
value = option.get_value()
if isinstance(option, PersonOption):
if not dbase.get_person_from_gramps_id(value):
person_handle = uistate.get_active('Person')
person = dbase.get_person_from_handle(person_handle)
option.set_value(person.get_gramps_id())
elif isinstance(option, FamilyOption):
if not dbase.get_family_from_gramps_id(value):
person_handle = uistate.get_active('Person')
person = dbase.get_person_from_handle(person_handle)
if person is None:
continue
family_list = person.get_family_handle_list()
if family_list:
family_handle = family_list[0]
else:
try:
family_handle = next(dbase.iter_family_handles())
except StopIteration:
family_handle = None
if family_handle:
family = dbase.get_family_from_handle(family_handle)
option.set_value(family.get_gramps_id())
else:
print("No family specified for ", name)
#------------------------------------------------------------------------
#
# BookListDisplay class
#
#------------------------------------------------------------------------
class BookListDisplay:
"""
Interface into a dialog with the list of available books.
Allows the user to select and/or delete a book from the list.
"""
def __init__(self, booklist, nodelete=False, dosave=False, parent=None):
"""
Create a BookListDisplay object that displays the books in BookList.
booklist: books that are displayed -- a :class:`.BookList` instance
nodelete: if True then the Delete button is hidden
dosave: if True then the book list is flagged to be saved if needed
"""
self.booklist = booklist
self.dosave = dosave
self.xml = Glade('book.glade')
self.top = self.xml.toplevel
self.unsaved_changes = False
set_titles(self.top, self.xml.get_object('title2'),
_('Available Books'))
if nodelete:
delete_button = self.xml.get_object("delete_button")
delete_button.hide()
self.xml.connect_signals({
"on_booklist_cancel_clicked" : self.on_booklist_cancel_clicked,
"on_booklist_ok_clicked" : self.on_booklist_ok_clicked,
"on_booklist_delete_clicked" : self.on_booklist_delete_clicked,
"on_book_ok_clicked" : self.do_nothing,
"destroy_passed_object" : self.do_nothing,
"on_setup_clicked" : self.do_nothing,
"on_down_clicked" : self.do_nothing,
"on_up_clicked" : self.do_nothing,
"on_remove_clicked" : self.do_nothing,
"on_add_clicked" : self.do_nothing,
"on_edit_clicked" : self.do_nothing,
"on_open_clicked" : self.do_nothing,
"on_save_clicked" : self.do_nothing,
"on_clear_clicked" : self.do_nothing
})
self.guilistbooks = self.xml.get_object('list')
self.guilistbooks.connect('button-press-event', self.on_button_press)
self.guilistbooks.connect('key-press-event', self.on_key_pressed)
self.blist = ListModel(self.guilistbooks, [('Name', -1, 10)],)
self.redraw()
self.selection = None
self.top.set_transient_for(parent)
self.top.run()
def redraw(self):
"""Redraws the list of currently available books"""
self.blist.model.clear()
names = self.booklist.get_book_names()
if not len(names):
return
for name in names:
the_iter = self.blist.add([name])
if the_iter:
self.blist.selection.select_iter(the_iter)
def on_booklist_ok_clicked(self, obj):
"""
Return selected book.
Also marks the current list to be saved into the xml file, if needed.
"""
store, the_iter = self.blist.get_selected()
if the_iter:
data = self.blist.get_data(the_iter, [0])
self.selection = self.booklist.get_book(str(data[0]))
if self.dosave and self.unsaved_changes:
self.booklist.set_needs_saving(True)
def on_booklist_delete_clicked(self, obj):
"""
Deletes selected book from the list.
This change is not final. OK button has to be clicked to save the list.
"""
store, the_iter = self.blist.get_selected()
if not the_iter:
return
data = self.blist.get_data(the_iter, [0])
self.booklist.delete_book(str(data[0]))
self.blist.remove(the_iter)
self.unsaved_changes = True
self.top.run()
def on_booklist_cancel_clicked(self, obj):
""" cancel the booklist dialog """
if self.unsaved_changes:
qqq = QuestionDialog2(
_('Discard Unsaved Changes'),
_('You have made changes which have not been saved.'),
_('Proceed'),
_('Cancel'),
parent=self.top)
if not qqq.run():
self.top.run()
def on_button_press(self, obj, event):
"""
Checks for a double click event. In the list, we want to
treat a double click as if it was OK button press.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
store, the_iter = self.blist.get_selected()
if not the_iter:
return False
self.on_booklist_ok_clicked(obj)
#emit OK response on dialog to close it automatically
self.top.response(-5)
return True
return False
def on_key_pressed(self, obj, event):
"""
Handles the return key being pressed on list. If the key is pressed,
the Edit button handler is called
"""
if event.type == Gdk.EventType.KEY_PRESS:
if event.keyval in (_RETURN, _KP_ENTER):
self.on_booklist_ok_clicked(obj)
#emit OK response on dialog to close it automatically
self.top.response(-5)
return True
return False
def do_nothing(self, obj):
""" do nothing """
pass
#------------------------------------------------------------------------
#
# Book Options
#
#------------------------------------------------------------------------
class BookOptions(ReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
ReportOptions.__init__(self, name, dbase)
# Options specific for this report
self.options_dict = {
'bookname' : '',
}
# TODO since the CLI code for the "book" generates its own "help" now,
# the GUI code would be faster if it didn't list all the possible books
self.options_help = {
'bookname' : ("=name", _("Name of the book. MANDATORY"),
BookList('books.xml', dbase).get_book_names(),
False),
}
#-------------------------------------------------------------------------
#
# Book creation dialog
#
#-------------------------------------------------------------------------
class BookSelector(ManagedWindow):
"""
Interface into a dialog setting up the book.
Allows the user to add/remove/reorder/setup items for the current book
and to clear/load/save/edit whole books.
"""
def __init__(self, dbstate, uistate):
self._db = dbstate.db
self.dbstate = dbstate
self.uistate = uistate
self.title = _('Manage Books')
self.file = "books.xml"
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.xml = Glade('book.glade', toplevel="top")
window = self.xml.toplevel
title_label = self.xml.get_object('title')
self.set_window(window, title_label, self.title)
self.setup_configs('interface.bookselector', 700, 600)
self.show()
self.xml.connect_signals({
"on_add_clicked" : self.on_add_clicked,
"on_remove_clicked" : self.on_remove_clicked,
"on_up_clicked" : self.on_up_clicked,
"on_down_clicked" : self.on_down_clicked,
"on_setup_clicked" : self.on_setup_clicked,
"on_clear_clicked" : self.on_clear_clicked,
"on_save_clicked" : self.on_save_clicked,
"on_open_clicked" : self.on_open_clicked,
"on_edit_clicked" : self.on_edit_clicked,
"on_book_ok_clicked" : self.on_book_ok_clicked,
"destroy_passed_object" : self.on_close_clicked,
# Insert dummy handlers for second top level in the glade file
"on_booklist_ok_clicked" : lambda _: None,
"on_booklist_delete_clicked" : lambda _: None,
"on_booklist_cancel_clicked" : lambda _: None,
"on_booklist_ok_clicked" : lambda _: None,
"on_booklist_ok_clicked" : lambda _: None,
})
self.avail_tree = self.xml.get_object("avail_tree")
self.book_tree = self.xml.get_object("book_tree")
self.avail_tree.connect('button-press-event', self.avail_button_press)
self.book_tree.connect('button-press-event', self.book_button_press)
self.name_entry = self.xml.get_object("name_entry")
self.name_entry.set_text(_('New Book'))
avail_label = self.xml.get_object('avail_label')
avail_label.set_text("<b>%s</b>" % _("_Available items"))
avail_label.set_use_markup(True)
avail_label.set_use_underline(True)
book_label = self.xml.get_object('book_label')
book_label.set_text("<b>%s</b>" % _("Current _book"))
book_label.set_use_underline(True)
book_label.set_use_markup(True)
avail_titles = [(_('Name'), 0, 230),
(_('Type'), 1, 80),
('', -1, 0)]
book_titles = [(_('Item name'), -1, 230),
(_('Type'), -1, 80),
('', -1, 0),
(_('Subject'), -1, 50)]
self.avail_nr_cols = len(avail_titles)
self.book_nr_cols = len(book_titles)
self.avail_model = ListModel(self.avail_tree, avail_titles)
self.book_model = ListModel(self.book_tree, book_titles)
self.draw_avail_list()
self.book = Book()
self.book_list = BookList(self.file, self._db)
self.book_list.set_needs_saving(False) # just read in: no need to save
def build_menu_names(self, obj):
return (_("Book selection list"), self.title)
def draw_avail_list(self):
"""
Draw the list with the selections available for the book.
The selections are read from the book item registry.
"""
pmgr = GuiPluginManager.get_instance()
regbi = pmgr.get_reg_bookitems()
if not regbi:
return
available_reports = []
for pdata in regbi:
category = _UNSUPPORTED
if pdata.supported and pdata.category in book_categories:
category = book_categories[pdata.category]
available_reports.append([pdata.name, category, pdata.id])
for data in sorted(available_reports):
new_iter = self.avail_model.add(data)
self.avail_model.connect_model()
if new_iter:
self.avail_model.selection.select_iter(new_iter)
path = self.avail_model.model.get_path(new_iter)
col = self.avail_tree.get_column(0)
self.avail_tree.scroll_to_cell(path, col, 1, 1, 0.0)
def open_book(self, book):
"""
Open the book: set the current set of selections to this book's items.
book: the book object to load.
"""
if book.get_paper_name():
self.book.set_paper_name(book.get_paper_name())
if book.get_orientation() is not None: # 0 is legal
self.book.set_orientation(book.get_orientation())
if book.get_paper_metric() is not None: # 0 is legal
self.book.set_paper_metric(book.get_paper_metric())
if book.get_custom_paper_size():
self.book.set_custom_paper_size(book.get_custom_paper_size())
if book.get_margins():
self.book.set_margins(book.get_margins())
if book.get_format_name():
self.book.set_format_name(book.get_format_name())
if book.get_output():
self.book.set_output(book.get_output())
if book.get_dbname() != self._db.get_save_path():
WarningDialog(
_('Different database'),
_('This book was created with the references to database '
'%s.\n\n This makes references to the central person '
'saved in the book invalid.\n\n'
'Therefore, the central person for each item is being set '
'to the active person of the currently opened database.'
) % book.get_dbname(),
parent=self.window)
self.book.clear()
self.book_model.clear()
for saved_item in book.get_item_list():
name = saved_item.get_name()
item = BookItem(self._db, name)
# The option values were loaded magically by the book parser.
# But they still need to be applied to the menu options.
opt_dict = item.option_class.handler.options_dict
orig_opt_dict = saved_item.option_class.handler.options_dict
menu = item.option_class.menu
for optname in opt_dict:
opt_dict[optname] = orig_opt_dict[optname]
menu_option = menu.get_option_by_name(optname)
if menu_option:
menu_option.set_value(opt_dict[optname])
_initialize_options(item.option_class, self.dbstate, self.uistate)
item.set_style_name(saved_item.get_style_name())
self.book.append_item(item)
data = [item.get_translated_name(),
item.get_category(), item.get_name()]
data[2] = item.option_class.get_subject()
self.book_model.add(data)
def on_add_clicked(self, obj):
"""
Add an item to the current selections.
Use the selected available item to get the item's name in the registry.
"""
store, the_iter = self.avail_model.get_selected()
if not the_iter:
return
data = self.avail_model.get_data(the_iter,
list(range(self.avail_nr_cols)))
item = BookItem(self._db, data[2])
_initialize_options(item.option_class, self.dbstate, self.uistate)
data[2] = item.option_class.get_subject()
self.book_model.add(data)
self.book.append_item(item)
def on_remove_clicked(self, obj):
"""
Remove the item from the current list of selections.
"""
store, the_iter = self.book_model.get_selected()
if not the_iter:
return
row = self.book_model.get_selected_row()
self.book.pop_item(row)
self.book_model.remove(the_iter)
def on_clear_clicked(self, obj):
"""
Clear the whole current book.
"""
self.book_model.clear()
self.book.clear()
def on_up_clicked(self, obj):
"""
Move the currently selected item one row up in the selection list.
"""
row = self.book_model.get_selected_row()
if not row or row == -1:
return
store, the_iter = self.book_model.get_selected()
data = self.book_model.get_data(the_iter,
list(range(self.book_nr_cols)))
self.book_model.remove(the_iter)
self.book_model.insert(row-1, data, None, 1)
item = self.book.pop_item(row)
self.book.insert_item(row-1, item)
def on_down_clicked(self, obj):
"""
Move the currently selected item one row down in the selection list.
"""
row = self.book_model.get_selected_row()
if row + 1 >= self.book_model.count or row == -1:
return
store, the_iter = self.book_model.get_selected()
data = self.book_model.get_data(the_iter,
list(range(self.book_nr_cols)))
self.book_model.remove(the_iter)
self.book_model.insert(row+1, data, None, 1)
item = self.book.pop_item(row)
self.book.insert_item(row+1, item)
def on_setup_clicked(self, obj):
"""
Configure currently selected item.
"""
store, the_iter = self.book_model.get_selected()
if not the_iter:
WarningDialog(_('No selected book item'),
_('Please select a book item to configure.'),
parent=self.window)
return
row = self.book_model.get_selected_row()
item = self.book.get_item(row)
option_class = item.option_class
option_class.handler.set_default_stylesheet_name(item.get_style_name())
item.is_from_saved_book = bool(self.book.get_name())
item_dialog = BookItemDialog(self.dbstate, self.uistate,
item, self.track)
while True:
response = item_dialog.window.run()
if response == Gtk.ResponseType.OK:
# dialog will be closed by connect, now continue work while
# rest of dialog is unresponsive, release when finished
style = option_class.handler.get_default_stylesheet_name()
item.set_style_name(style)
subject = option_class.get_subject()
self.book_model.model.set_value(the_iter, 2, subject)
self.book.set_item(row, item)
item_dialog.close()
break
elif response == Gtk.ResponseType.CANCEL:
item_dialog.close()
break
elif response == Gtk.ResponseType.DELETE_EVENT:
#just stop, in ManagedWindow, delete-event is already coupled to
#correct action.
break
opt_dict = option_class.handler.options_dict
for optname in opt_dict:
menu_option = option_class.menu.get_option_by_name(optname)
if menu_option:
menu_option.set_value(opt_dict[optname])
def book_button_press(self, obj, event):
"""
Double-click on the current book selection is the same as setup.
Right click evokes the context menu.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_setup_clicked(obj)
elif is_right_click(event):
self.build_book_context_menu(event)
def avail_button_press(self, obj, event):
"""
Double-click on the available selection is the same as add.
Right click evokes the context menu.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_add_clicked(obj)
elif is_right_click(event):
self.build_avail_context_menu(event)
def build_book_context_menu(self, event):
"""Builds the menu with item-centered and book-centered options."""
store, the_iter = self.book_model.get_selected()
if the_iter:
sensitivity = 1
else:
sensitivity = 0
entries = [
(_('_Up'), self.on_up_clicked, sensitivity),
(_('_Down'), self.on_down_clicked, sensitivity),
(_("Setup"), self.on_setup_clicked, sensitivity),
(_('_Remove'), self.on_remove_clicked, sensitivity),
('', None, 0),
(_('Clear the book'), self.on_clear_clicked, 1),
(_('_Save'), self.on_save_clicked, 1),
(_('_Open'), self.on_open_clicked, 1),
(_("_Edit"), self.on_edit_clicked, 1),
]
self.menu1 = Gtk.Menu() # TODO could this be just a local "menu ="?
self.menu1.set_reserve_toggle_size(False)
for title, callback, sensitivity in entries:
item = Gtk.MenuItem.new_with_mnemonic(title)
Gtk.Label.new_with_mnemonic
if callback:
item.connect("activate", callback)
else:
item = Gtk.SeparatorMenuItem()
item.set_sensitive(sensitivity)
item.show()
self.menu1.append(item)
self.menu1.popup(None, None, None, None, event.button, event.time)
def build_avail_context_menu(self, event):
"""Builds the menu with the single Add option."""
store, the_iter = self.avail_model.get_selected()
if the_iter:
sensitivity = 1
else:
sensitivity = 0
entries = [
(_('_Add'), self.on_add_clicked, sensitivity),
]
self.menu2 = Gtk.Menu() # TODO could this be just a local "menu ="?
self.menu2.set_reserve_toggle_size(False)
for title, callback, sensitivity in entries:
item = Gtk.MenuItem.new_with_mnemonic(title)
if callback:
item.connect("activate", callback)
item.set_sensitive(sensitivity)
item.show()
self.menu2.append(item)
self.menu2.popup(None, None, None, None, event.button, event.time)
def on_close_clicked(self, obj):
"""
close the BookSelector dialog, saving any changes if needed
"""
if self.book_list.get_needs_saving():
self.book_list.save()
ManagedWindow.close(self, *obj)
def on_book_ok_clicked(self, obj):
"""
Run final BookDialog with the current book.
"""
if self.book.get_item_list():
old_paper_name = self.book.get_paper_name() # from books.xml
old_orientation = self.book.get_orientation()
old_paper_metric = self.book.get_paper_metric()
old_custom_paper_size = self.book.get_custom_paper_size()
old_margins = self.book.get_margins()
old_format_name = self.book.get_format_name()
old_output = self.book.get_output()
BookDialog(self.dbstate, self.uistate, self.book, BookOptions)
new_paper_name = self.book.get_paper_name()
new_orientation = self.book.get_orientation()
new_paper_metric = self.book.get_paper_metric()
new_custom_paper_size = self.book.get_custom_paper_size()
new_margins = self.book.get_margins()
new_format_name = self.book.get_format_name()
new_output = self.book.get_output()
# only books in the booklist have a name (not "ad hoc" ones)
if (self.book.get_name() and
(old_paper_name != new_paper_name or
old_orientation != new_orientation or
old_paper_metric != new_paper_metric or
old_custom_paper_size != new_custom_paper_size or
old_margins != new_margins or
old_format_name != new_format_name or
old_output != new_output)):
self.book.set_dbname(self._db.get_save_path())
self.book_list.set_book(self.book.get_name(), self.book)
self.book_list.set_needs_saving(True)
if self.book_list.get_needs_saving():
self.book_list.save()
else:
WarningDialog(_('No items'),
_('This book has no items.'),
parent=self.window)
return
self.close()
def on_save_clicked(self, obj):
"""
Save the current book in the xml booklist file.
"""
if not self.book.get_item_list():
WarningDialog(_('No items'),
_('This book has no items.'),
parent=self.window)
return
name = str(self.name_entry.get_text())
if not name:
WarningDialog(
_('No book name'),
_('You are about to save away a book with no name.\n\n'
'Please give it a name before saving it away.'),
parent=self.window)
return
if name in self.book_list.get_book_names():
qqq = QuestionDialog2(
_('Book name already exists'),
_('You are about to save away a '
'book with a name which already exists.'),
_('Proceed'),
_('Cancel'),
parent=self.window)
if not qqq.run():
return
# previously, the same book could be added to the booklist
# under multiple names, which became different books once the
# booklist was saved into a file so everything was fine, but
# this created a problem once the paper settings were added
# to the Book object in the BookDialog, since those settings
# were retrieved from the Book object in BookList.save, so mutiple
# books (differentiated by their names) were assigned the
# same paper values, so the solution is to make each Book be
# unique in the booklist, so if multiple copies are saved away
# only the last one will get the paper values assigned to it
# (although when the earlier books are then eventually run,
# they'll be assigned paper values also)
self.book.set_name(name)
self.book.set_dbname(self._db.get_save_path())
self.book_list.set_book(name, self.book)
self.book_list.set_needs_saving(True) # user clicked on save
self.book = Book(self.book, exact_copy=False) # regenerate old items
self.book.set_name(name)
self.book.set_dbname(self._db.get_save_path())
def on_open_clicked(self, obj):
"""
Run the BookListDisplay dialog to present the choice of books to open.
"""
booklistdisplay = BookListDisplay(self.book_list, nodelete=True,
dosave=False, parent=self.window)
booklistdisplay.top.destroy()
book = booklistdisplay.selection
if book:
self.open_book(book)
self.name_entry.set_text(book.get_name())
self.book.set_name(book.get_name())
def on_edit_clicked(self, obj):
"""
Run the BookListDisplay dialog to present the choice of books to delete.
"""
booklistdisplay = BookListDisplay(self.book_list, nodelete=False,
dosave=True, parent=self.window)
booklistdisplay.top.destroy()
book = booklistdisplay.selection
if book:
self.open_book(book)
self.name_entry.set_text(book.get_name())
self.book.set_name(book.get_name())
#------------------------------------------------------------------------
#
# Book Item Options dialog
#
#------------------------------------------------------------------------
class BookItemDialog(ReportDialog):
"""
This class overrides the interface methods common for different reports
in a way specific for this report. This is a book item dialog.
"""
def __init__(self, dbstate, uistate, item, track=[]):
option_class = item.option_class
name = item.get_name()
translated_name = item.get_translated_name()
self.category = CATEGORY_BOOK
self.database = dbstate.db
self.option_class = option_class
self.is_from_saved_book = item.is_from_saved_book
ReportDialog.__init__(self, dbstate, uistate,
option_class, name, translated_name, track)
def on_ok_clicked(self, obj):
"""The user is satisfied with the dialog choices. Parse all options
and close the window."""
# Preparation
self.parse_style_frame()
self.parse_user_options()
self.options.handler.save_options()
def setup_target_frame(self):
"""Target frame is not used."""
pass
def parse_target_frame(self):
"""Target frame is not used."""
return 1
def init_options(self, option_class):
try:
if issubclass(option_class, object):
self.options = option_class(self.raw_name, self.database)
except TypeError:
self.options = option_class
if not self.is_from_saved_book:
self.options.load_previous_values()
def add_user_options(self):
"""
Generic method to add user options to the gui.
"""
if not hasattr(self.options, "menu"):
return
menu = self.options.menu
options_dict = self.options.options_dict
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
# override option default with xml-saved value:
if name in options_dict:
option.set_value(options_dict[name])
widget, label = make_gui_option(option, self.dbstate,
self.uistate, self.track,
self.is_from_saved_book)
if widget is not None:
if label:
self.add_frame_option(category,
option.get_label(),
widget)
else:
self.add_frame_option(category, "", widget)
#-------------------------------------------------------------------------
#
# _BookFormatComboBox
#
#-------------------------------------------------------------------------
class _BookFormatComboBox(Gtk.ComboBox):
"""
Build a menu of report types that are appropriate for a book
"""
def __init__(self, active):
Gtk.ComboBox.__init__(self)
pmgr = GuiPluginManager.get_instance()
self.__bookdoc_plugins = []
for plugin in pmgr.get_docgen_plugins():
if plugin.get_text_support() and plugin.get_draw_support():
self.__bookdoc_plugins.append(plugin)
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for plugin in self.__bookdoc_plugins:
name = plugin.get_name()
self.store.append(row=[name])
if plugin.get_extension() == active:
active_index = index
index += 1
self.set_active(active_index)
def get_active_plugin(self):
"""
Get the plugin represented by the currently active selection.
"""
return self.__bookdoc_plugins[self.get_active()]
#------------------------------------------------------------------------
#
# The final dialog - paper, format, target, etc.
#
#------------------------------------------------------------------------
class BookDialog(DocReportDialog):
"""
A usual Report.Dialog subclass.
Create a dialog selecting target, format, and paper/HTML options.
"""
def __init__(self, dbstate, uistate, book, options):
self.format_menu = None
self.options = options
self.page_html_added = False
self.book = book
self.title = _('Generate Book')
self.database = dbstate.db
DocReportDialog.__init__(self, dbstate, uistate, options,
'book', self.title)
self.options.options_dict['bookname'] = self.book.get_name()
response = self.window.run()
if response == Gtk.ResponseType.OK:
handler = self.options.handler
if self.book.get_paper_name() != handler.get_paper_name():
self.book.set_paper_name(handler.get_paper_name())
if self.book.get_orientation() != handler.get_orientation():
self.book.set_orientation(handler.get_orientation())
if self.book.get_paper_metric() != handler.get_paper_metric():
self.book.set_paper_metric(handler.get_paper_metric())
if (self.book.get_custom_paper_size() !=
handler.get_custom_paper_size()):
self.book.set_custom_paper_size(handler.get_custom_paper_size())
if self.book.get_margins() != handler.get_margins():
self.book.set_margins(handler.get_margins())
if self.book.get_format_name() != handler.get_format_name():
self.book.set_format_name(handler.get_format_name())
if self.book.get_output() != self.options.get_output():
self.book.set_output(self.options.get_output())
try:
self.make_book()
except (IOError, OSError) as msg:
ErrorDialog(str(msg), parent=self.window)
self.close()
def setup_style_frame(self):
pass
def setup_other_frames(self):
pass
def parse_style_frame(self):
pass
def get_title(self):
""" get the title """
return self.title
def get_header(self, name):
""" get the header """
return _("Gramps Book")
def make_doc_menu(self, active=None):
"""Build a menu of document types that are appropriate for
this text report. This menu will be generated based upon
whether the document requires table support, etc."""
self.format_menu = _BookFormatComboBox(active)
def make_document(self):
"""Create a document of the type requested by the user."""
user = User(uistate=self.uistate)
self.rptlist = []
selected_style = StyleSheet()
pstyle = self.paper_frame.get_paper_style()
self.doc = self.format(None, pstyle)
for item in self.book.get_item_list():
item.option_class.set_document(self.doc)
report_class = item.get_write_item()
obj = (write_book_item(self.database, report_class,
item.option_class, user),
item.get_translated_name())
self.rptlist.append(obj)
append_styles(selected_style, item)
self.doc.set_style_sheet(selected_style)
self.doc.open(self.target_path)
def make_book(self):
"""
The actual book. Start it out, then go through the item list
and call each item's write_book_item method (which were loaded
by the previous make_document method).
"""
try:
self.doc.init()
newpage = 0
for (rpt, name) in self.rptlist:
if newpage:
self.doc.page_break()
newpage = 1
if rpt:
rpt.begin_report()
rpt.write_report()
self.doc.close()
except ReportError as msg:
(msg1, msg2) = msg.messages()
msg2 += ' (%s)' % name # which report has the error?
ErrorDialog(msg1, msg2, parent=self.uistate.window)
return
except FilterError as msg:
(msg1, msg2) = msg.messages()
ErrorDialog(msg1, msg2, parent=self.uistate.window)
return
if self.open_with_app.get_active():
open_file_with_default_application(self.target_path, self.uistate)
def init_options(self, option_class):
try:
if issubclass(option_class, object):
self.options = option_class(self.raw_name, self.database)
except TypeError:
self.options = option_class
self.options.load_previous_values()
handler = self.options.handler
if self.book.get_paper_name():
handler.set_paper_name(self.book.get_paper_name())
if self.book.get_orientation() is not None: # 0 is legal
handler.set_orientation(self.book.get_orientation())
if self.book.get_paper_metric() is not None: # 0 is legal
handler.set_paper_metric(self.book.get_paper_metric())
if self.book.get_custom_paper_size():
handler.set_custom_paper_size(self.book.get_custom_paper_size())
if self.book.get_margins():
handler.set_margins(self.book.get_margins())
if self.book.get_format_name():
handler.set_format_name(self.book.get_format_name())
if self.book.get_output():
self.options.set_output(self.book.get_output())
#------------------------------------------------------------------------
#
# Generic task function for book
#
#------------------------------------------------------------------------
def write_book_item(database, report_class, options, user):
"""
Write the report using options set.
All user dialog has already been handled and the output file opened.
"""
try:
return report_class(database, options, user)
except ReportError as msg:
(msg1, msg2) = msg.messages()
ErrorDialog(msg1, msg2, parent=user.uistate.window)
except FilterError as msg:
(msg1, msg2) = msg.messages()
ErrorDialog(msg1, msg2, parent=user.uistate.window)
except:
LOG.error("Failed to write book item.", exc_info=True)
return None
| beernarrd/gramps | gramps/gui/plug/report/_bookdialog.py | Python | gpl-2.0 | 42,024 | [
"Brian"
] | c63101f56f95b9f0ee00968974f2520dca55fd28fc8947b1662a44a91439a1b2 |
"""
A Mayavi example to show the different data sets. See
:ref:`data-structures-used-by-mayavi` for a discussion.
The following images are created:
.. hlist::
* **ImageData**
.. image:: ../image_data.jpg
:scale: 50
* **RectilinearGrid**
.. image:: ../rectilinear_grid.jpg
:scale: 50
* **StructuredGrid**
.. image:: ../structured_grid.jpg
:scale: 50
* **UnstructuredGrid**
.. image:: ../unstructured_grid.jpg
:scale: 50
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD style.
from numpy import array, random, linspace, pi, ravel, cos, sin, empty
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
def image_data():
data = random.random((3, 3, 3))
i = tvtk.ImageData(spacing=(1, 1, 1), origin=(0, 0, 0))
i.point_data.scalars = data.ravel()
i.point_data.scalars.name = 'scalars'
i.dimensions = data.shape
return i
def rectilinear_grid():
data = random.random((3, 3, 3))
r = tvtk.RectilinearGrid()
r.point_data.scalars = data.ravel()
r.point_data.scalars.name = 'scalars'
r.dimensions = data.shape
r.x_coordinates = array((0, 0.7, 1.4))
r.y_coordinates = array((0, 1, 3))
r.z_coordinates = array((0, .5, 2))
return r
def generate_annulus(r, theta, z):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a unstructured
cylindrical mesh for VTK (and perhaps other tools).
"""
# Find the x values and y values for each plane.
x_plane = (cos(theta)*r[:,None]).ravel()
y_plane = (sin(theta)*r[:,None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = empty([len(x_plane)*len(z),3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start+len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:,0] = x_plane
plane_points[:,1] = y_plane
plane_points[:,2] = z_plane
start = end
return points
def structured_grid():
# Make the data.
dims = (3, 4, 3)
r = linspace(5, 15, dims[0])
theta = linspace(0, 0.5*pi, dims[1])
z = linspace(0, 10, dims[2])
pts = generate_annulus(r, theta, z)
sgrid = tvtk.StructuredGrid(dimensions=(dims[1], dims[0], dims[2]))
sgrid.points = pts
s = random.random((dims[0]*dims[1]*dims[2]))
sgrid.point_data.scalars = ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
return sgrid
def unstructured_grid():
points = array([[0,1.2,0.6], [1,0,0], [0,1,0], [1,1,1], # tetra
[1,0,-0.5], [2,0,0], [2,1.5,0], [0,1,0],
[1,0,0], [1.5,-0.2,1], [1.6,1,1.5], [1,1,1], # Hex
], 'f')
# The cells
cells = array([4, 0, 1, 2, 3, # tetra
8, 4, 5, 6, 7, 8, 9, 10, 11 # hex
])
# The offsets for the cells, i.e. the indices where the cells
# start.
offset = array([0, 5])
tetra_type = tvtk.Tetra().cell_type # VTK_TETRA == 10
hex_type = tvtk.Hexahedron().cell_type # VTK_HEXAHEDRON == 12
cell_types = array([tetra_type, hex_type])
# Create the array of cells unambiguously.
cell_array = tvtk.CellArray()
cell_array.set_cells(2, cells)
# Now create the UG.
ug = tvtk.UnstructuredGrid(points=points)
# Now just set the cell types and reuse the ug locations and cells.
ug.set_cells(cell_types, offset, cell_array)
scalars = random.random(points.shape[0])
ug.point_data.scalars = scalars
ug.point_data.scalars.name = 'scalars'
return ug
def polydata():
# The numpy array data.
points = array([[0,-0.5,0], [1.5,0,0], [0,1,0], [0,0,0.5],
[-1,-1.5,0.1], [0,-1, 0.5], [-1, -0.5, 0],
[1,0.8,0]], 'f')
triangles = array([[0,1,3], [1,2,3], [1,0,5],
[2,3,4], [3,0,4], [0,5,4], [2, 4, 6],
[2, 1, 7]])
scalars = random.random(points.shape)
# The TVTK dataset.
mesh = tvtk.PolyData(points=points, polys=triangles)
mesh.point_data.scalars = scalars
mesh.point_data.scalars.name = 'scalars'
return mesh
def view(dataset):
""" Open up a mayavi scene and display the dataset in it.
"""
fig = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0),
figure=dataset.class_name[3:])
surf = mlab.pipeline.surface(dataset, opacity=0.1)
mlab.pipeline.surface(mlab.pipeline.extract_edges(surf),
color=(0, 0, 0), )
@mlab.show
def main():
view(image_data())
view(rectilinear_grid())
view(structured_grid())
view(unstructured_grid())
view(polydata())
if __name__ == '__main__':
main()
| dmsurti/mayavi | examples/mayavi/advanced_visualization/datasets.py | Python | bsd-3-clause | 5,311 | [
"Mayavi",
"VTK"
] | 2f545d3d6129317b37b6856ba753ec9c68570bc1aa845221e8a4734b1f0b118e |
from __future__ import print_function, division
import glob
import itertools
import json
import os
import shlex
import shutil
import subprocess
import tempfile
import hmmlearn.hmm
import mdtraj as md
import numpy as np
from mdtraj.testing import eq
from mdtraj.testing import get_fn as get_mdtraj_fn
from msmbuilder.dataset import dataset
from msmbuilder.example_datasets import get_data_home
from msmbuilder.utils import load
DATADIR = HMM = None
def setup_module():
global DATADIR, HMM
DATADIR = tempfile.mkdtemp()
# 4 components and 3 features. Each feature is going to be the x, y, z
# coordinate of 1 atom
HMM = hmmlearn.hmm.GaussianHMM(n_components=4)
HMM.transmat_ = np.array([[0.9, 0.1, 0.0, 0.0],
[0.1, 0.7, 0.2, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.0, 0.1, 0.1, 0.8]])
HMM.means_ = np.array([[-10, -10, -10],
[-5, -5, -5],
[5, 5, 5],
[10, 10, 10]])
HMM.covars_ = np.array([[0.1, 0.1, 0.1],
[0.5, 0.5, 0.5],
[1, 1, 1],
[4, 4, 4]])
HMM.startprob_ = np.array([1, 1, 1, 1]) / 4.0
# get a 1 atom topology
topology = md.load(get_mdtraj_fn('native.pdb')).atom_slice([1]).topology
# generate the trajectories and save them to disk
for i in range(10):
d, s = HMM.sample(100)
t = md.Trajectory(xyz=d.reshape(len(d), 1, 3), topology=topology)
t.save(os.path.join(DATADIR, 'Trajectory%d.h5' % i))
assert os.path.exists("{}/alanine_dipeptide".format(get_data_home()))
def teardown_module():
shutil.rmtree(DATADIR)
class tempdir(object):
def __enter__(self):
self._curdir = os.path.abspath(os.curdir)
self._tempdir = tempfile.mkdtemp()
os.chdir(self._tempdir)
def __exit__(self, *exc_info):
os.chdir(self._curdir)
shutil.rmtree(self._tempdir)
def shell(str):
split = shlex.split(str, posix=False)
try:
subprocess.check_output(split, stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as e:
print(e.cmd)
print(e.output)
raise
def test_atomindices_1():
fn = get_mdtraj_fn('2EQQ.pdb')
t = md.load(fn)
with tempdir():
shell('msmb AtomIndices -o all.txt --all -a -p %s' % fn)
shell('msmb AtomIndices -o all-pairs.txt --all -d -p %s' % fn)
atoms = np.loadtxt('all.txt', int)
pairs = np.loadtxt('all-pairs.txt', int)
eq(t.n_atoms, len(atoms))
eq(int(t.n_atoms * (t.n_atoms - 1) / 2), len(pairs))
def test_atomindices_2():
fn = get_mdtraj_fn('2EQQ.pdb')
t = md.load(fn)
with tempdir():
shell('msmb AtomIndices -o heavy.txt --heavy -a -p %s' % fn)
shell('msmb AtomIndices -o heavy-pairs.txt --heavy -d -p %s' % fn)
atoms = np.loadtxt('heavy.txt', int)
pairs = np.loadtxt('heavy-pairs.txt', int)
assert all(t.topology.atom(i).element.symbol != 'H' for i in atoms)
assert (sum(1 for a in t.topology.atoms if a.element.symbol != 'H') ==
len(atoms))
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
def test_atomindices_3():
fn = get_mdtraj_fn('2EQQ.pdb')
t = md.load(fn)
with tempdir():
shell('msmb AtomIndices -o alpha.txt --alpha -a -p %s' % fn)
shell('msmb AtomIndices -o alpha-pairs.txt --alpha -d -p %s' % fn)
atoms = np.loadtxt('alpha.txt', int)
pairs = np.loadtxt('alpha-pairs.txt', int)
assert all(t.topology.atom(i).name == 'CA' for i in atoms)
assert sum(1 for a in t.topology.atoms if a.name == 'CA') == len(atoms)
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
def test_atomindices_4():
fn = get_mdtraj_fn('2EQQ.pdb')
t = md.load(fn)
with tempdir():
shell('msmb AtomIndices -o minimal.txt --minimal -a -p %s' % fn)
shell('msmb AtomIndices -o minimal-pairs.txt --minimal -d -p %s' % fn)
atoms = np.loadtxt('minimal.txt', int)
pairs = np.loadtxt('minimal-pairs.txt', int)
assert all(t.topology.atom(i).name in ['CA', 'CB', 'C', 'N', 'O']
for i in atoms)
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
def test_superpose_featurizer():
with tempdir():
shell('msmb AtomIndices -o all.txt --all -a -p '
'%s/alanine_dipeptide/ala2.pdb' % get_data_home()),
shell("msmb SuperposeFeaturizer "
"--trjs '{data_home}/alanine_dipeptide/*.dcd'"
" --transformed distances --atom_indices all.txt"
" --reference_traj {data_home}/alanine_dipeptide/ala2.pdb"
" --top {data_home}/alanine_dipeptide/ala2.pdb"
.format(data_home=get_data_home()))
ds = dataset('distances')
assert len(ds) == 10
assert ds[0].shape[1] == len(np.loadtxt('all.txt'))
print(ds.provenance)
def test_superpose_featurizer_reftop():
# see issue #555
with tempdir():
shell('msmb AtomIndices -o all.txt --all -a -p '
'%s/alanine_dipeptide/ala2.pdb' % get_data_home()),
shell("msmb SuperposeFeaturizer "
"--trjs '{data_home}/alanine_dipeptide/*.dcd'"
" --transformed distances --atom_indices all.txt"
" --reference_traj {data_home}/alanine_dipeptide/trajectory-0.dcd"
" --top {data_home}/alanine_dipeptide/ala2.pdb"
.format(data_home=get_data_home()))
ds = dataset('distances')
assert len(ds) == 10
assert ds[0].shape[1] == len(np.loadtxt('all.txt'))
print(ds.provenance)
def test_atom_pairs_featurizer():
with tempdir():
shell('msmb AtomIndices -o all.txt --all -d -p '
'%s/alanine_dipeptide/ala2.pdb' % get_data_home()),
shell("msmb AtomPairsFeaturizer "
"--trjs '{data_home}/alanine_dipeptide/*.dcd'"
" --transformed pairs --pair_indices all.txt"
" --top {data_home}/alanine_dipeptide/ala2.pdb"
.format(data_home=get_data_home()))
ds = dataset('pairs')
assert len(ds) == 10
assert ds[0].shape[1] == len(np.loadtxt('all.txt') ** 2)
print(ds.provenance)
def test_transform_command_1():
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/*.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd".format(data_home=get_data_home()))
shell("msmb TransformDataset -i {data_home}/alanine_dipeptide/*.dcd "
"-m model.pkl -t transformed.h5 --top "
"{data_home}/alanine_dipeptide/ala2.pdb"
.format(data_home=get_data_home()))
eq(dataset('transformed.h5')[0], load('model.pkl').labels_[0])
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/trajectory-0.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd".format(data_home=get_data_home()))
def test_transform_command_2():
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/trajectory-0.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd "
"--stride 2".format(data_home=get_data_home()))
def test_help():
shell('msmb -h')
def test_convert_chunked_project_1():
with tempdir():
root = os.path.join(get_data_home(), 'alanine_dipeptide')
assert os.path.exists(root)
cmd = ("msmb ConvertChunkedProject out {root} --pattern *.dcd "
"-t {root}/ala2.pdb".format(root=root))
shell(cmd)
assert set(os.listdir('out')) == {'traj-00000000.dcd',
'trajectories.jsonl'}
# check that out/traj-00000.dcd really has concatenated all of
# the input trajs
length = len(md.open('out/traj-00000000.dcd'))
assert length == sum(len(md.open(f))
for f in glob.glob('%s/*.dcd' % root))
with open('out/trajectories.jsonl') as f:
record = json.load(f)
assert set(record.keys()) == {'filename', 'chunks'}
assert record['filename'] == 'traj-00000000.dcd'
assert sorted(glob.glob('%s/*.dcd' % root)) == record['chunks']
| mpharrigan/mixtape | msmbuilder/tests/test_commands.py | Python | lgpl-2.1 | 8,581 | [
"MDTraj"
] | 28270b43a1bdac850d3aae7e1b06b22d300a1aa8eb672cf916a9a16a51cc0720 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
import inspect
import warnings
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import (stable, experimental, deprecated,
_state_decorator)
from skbio.util._exception import OverrideError
class TestOverrides(unittest.TestCase):
def test_raises_when_missing(self):
class A(object):
pass
with self.assertRaises(OverrideError):
class B(A):
@overrides(A)
def test(self):
pass
def test_doc_inherited(self):
class A(object):
def test(self):
"""Docstring"""
pass
class B(A):
@overrides(A)
def test(self):
pass
self.assertEqual(B.test.__doc__, "Docstring")
def test_doc_not_inherited(self):
class A(object):
def test(self):
"""Docstring"""
pass
class B(A):
@overrides(A)
def test(self):
"""Different"""
pass
self.assertEqual(B.test.__doc__, "Different")
class TestClassProperty(unittest.TestCase):
def test_getter_only(self):
class Foo(object):
_foo = 42
@classproperty
def foo(cls):
return cls._foo
# class-level getter
self.assertEqual(Foo.foo, 42)
# instance-level getter
f = Foo()
self.assertEqual(f.foo, 42)
with self.assertRaises(AttributeError):
f.foo = 4242
class TestStabilityState(unittest.TestCase):
# the indentation spacing gets weird, so I'm defining the
# input doc string explicitly and adding it after function
# defintion
_test_docstring = (" Add 42, or something else, to x.\n"
"\n"
" Parameters\n"
" ----------\n"
" x : int, x\n"
" y : int, optional\n")
class TestBase(TestStabilityState):
def test_get_indentation_level(self):
c = _state_decorator()
self.assertEqual(c._get_indentation_level([]), 0)
self.assertEqual(
c._get_indentation_level([], default_no_existing_docstring=3), 3)
self.assertEqual(c._get_indentation_level([""]), 4)
self.assertEqual(
c._get_indentation_level([""], default_existing_docstring=3), 3)
in_ = (["summary"])
self.assertEqual(c._get_indentation_level(in_), 4)
in_ = (["summary", "", "", " ", "", " ", ""])
self.assertEqual(c._get_indentation_level(in_), 4)
in_ = (["summary", " More indentation", " Less indentation"])
self.assertEqual(c._get_indentation_level(in_), 5)
def test_update_docstring(self):
c = _state_decorator()
in_ = None
exp = ("""State: Test!!""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = """"""
exp = ("""\n\n State: Test!!""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!Test!!Test!!Test!!Test!!"""
"""Test!!Test!!Test!!Test!!Test!!Test!!Te\n st!!T"""
"""est!!Test!!Test!!Test!!Test!!Test!!Test!!Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"*20), exp)
class TestStable(TestStabilityState):
def _get_f(self, as_of):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = stable(as_of=as_of)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0')
self.assertEqual(f(1), 43)
def test_function_docstring(self):
f = self._get_f('0.1.0')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Stable as of 0.1.0.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Stable as of 0.1.1.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, stable)
self.assertRaises(ValueError, stable, '0.1.0')
class TestExperimental(TestStabilityState):
def _get_f(self, as_of):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = experimental(as_of=as_of)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0')
self.assertEqual(f(1), 43)
def test_function_docstring(self):
f = self._get_f('0.1.0')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Experimental as of 0.1.0.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Experimental as of 0.1.1.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, experimental)
self.assertRaises(ValueError, experimental, '0.1.0')
class TestDeprecated(TestStabilityState):
def _get_f(self, as_of, until, reason):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = deprecated(as_of=as_of, until=until, reason=reason)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
self.assertEqual(f(1), 43)
def test_deprecation_warning(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
f(1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
expected_str = "is deprecated as of scikit-bio version 0.1.0"
self.assertTrue(expected_str in str(w[0].message))
def test_function_docstring(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
e1 = (" Add 42, or something else, to x.\n\n"
" .. note:: Deprecated as of 0.1.0 for "
"removal in 0.1.4. You should now use\n"
" skbio.g().\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1', until='0.1.5',
reason='You should now use skbio.h().')
e1 = (" Add 42, or something else, to x.\n\n"
" .. note:: Deprecated as of 0.1.1 for "
"removal in 0.1.5. You should now use\n"
" skbio.h().\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, deprecated)
self.assertRaises(ValueError, deprecated, '0.1.0')
self.assertRaises(ValueError, deprecated, as_of='0.1.0')
self.assertRaises(ValueError, deprecated, as_of='0.1.0', until='0.1.4')
if __name__ == '__main__':
unittest.main()
| demis001/scikit-bio | skbio/util/tests/test_decorator.py | Python | bsd-3-clause | 9,694 | [
"scikit-bio"
] | b9f9a1ae7003b7889cbf0ad1d497393b8abe5725d9a111b235b819f2e2075c16 |
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2000 import *
#Prodromou: Include SPEC2k6 files
import SPEC2k6_train
import SPEC2k6_ref
#Prodromou: For periodical stat dumping
from m5.internal.stats import periodicStatDump as statDump
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
# Prodromou: Add required options here so I don't have to
# re-write them every time
#options.cpu_type = "detailed"
#options.caches = True
#Prodromou: Invoke the benchmarks
if options.benchmark:
if options.bench_size == 'train':
if options.benchmark == 'perlbench':
process = SPEC2k6_train.perlbench
elif options.benchmark == 'bzip2':
process = SPEC2k6_train.bzip2
elif options.benchmark == 'gcc':
process = SPEC2k6_train.gcc
elif options.benchmark == 'mcf':
process = SPEC2k6_train.mcf
elif options.benchmark == 'milc':
process = SPEC2k6_train.milc
elif options.benchmark == 'gobmk':
process = SPEC2k6_train.gobmk
elif options.benchmark == 'hmmer':
process = SPEC2k6_train.hmmer
elif options.benchmark == 'sjeng':
process = SPEC2k6_train.sjeng
elif options.benchmark == 'libquantum':
process = SPEC2k6_train.libquantum
elif options.benchmark == 'h264ref':
process = SPEC2k6_train.h264ref
elif options.benchmark == 'lbm':
process = SPEC2k6_train.lbm
elif options.benchmark == 'sphinx3':
process = SPEC2k6_train.sphinx3
elif options.benchmark == 'specrand':
process = SPEC2k6_train.specrand
else:
print "Error: Unknown Benchmark"
sys.exit(1)
elif options.bench_size == 'ref':
if options.benchmark == 'perlbench':
process = SPEC2k6_ref.perlbench
elif options.benchmark == 'bzip2':
process = SPEC2k6_ref.bzip2
elif options.benchmark == 'gcc':
process = SPEC2k6_ref.gcc
elif options.benchmark == 'mcf':
process = SPEC2k6_ref.mcf
elif options.benchmark == 'milc':
process = SPEC2k6_ref.milc
elif options.benchmark == 'gobmk':
process = SPEC2k6_ref.gobmk
elif options.benchmark == 'hmmer':
process = SPEC2k6_ref.hmmer
elif options.benchmark == 'sjeng':
process = SPEC2k6_ref.sjeng
elif options.benchmark == 'libquantum':
process = SPEC2k6_ref.libquantum
elif options.benchmark == 'h264ref':
process = SPEC2k6_ref.h264ref
elif options.benchmark == 'lbm':
process = SPEC2k6_ref.lbm
elif options.benchmark == 'sphinx3':
process = SPEC2k6_ref.sphinx3
elif options.benchmark == 'specrand':
process = SPEC2k6_ref.specrand
elif options.benchmark == 'bwaves':
process = SPEC2k6_ref.bwaves
elif options.benchmark == 'gamess':
process = SPEC2k6_ref.gamess
elif options.benchmark == 'zeusmp':
process = SPEC2k6_ref.zeusmp
elif options.benchmark == 'leslie3d':
process = SPEC2k6_ref.leslie3d
elif options.benchmark == 'GemsFDTD':
process = SPEC2k6_ref.GemsFDTD
elif options.benchmark == 'tonto':
process = SPEC2k6_ref.tonto
elif options.benchmark == 'namd':
process = SPEC2k6_ref.namd
elif options.benchmark == 'dealII':
process = SPEC2k6_ref.dealII
elif options.benchmark == 'soplex':
process = SPEC2k6_ref.soplex
elif options.benchmark == 'povray':
process = SPEC2k6_ref.povray
elif options.benchmark == 'omnetpp':
process = SPEC2k6_ref.omnetpp
elif options.benchmark == 'astar':
process = SPEC2k6_ref.astar
elif options.benchmark == 'xalancbmk':
process = SPEC2k6_ref.xalancbmk
elif options.benchmark == 'gromacs':
process = SPEC2k6_ref.gromacs
elif options.benchmark == 'cactusADM':
process = SPEC2k6_ref.cactusADM
elif options.benchmark == 'calculix':
process = SPEC2k6_ref.calculix
elif options.benchmark == 'wrf':
process = SPEC2k6_ref.wrf
elif options.benchmark == 'perlbench_x86':
process = SPEC2k6_ref.perlbench_x86
elif options.benchmark == 'bzip2_x86':
process = SPEC2k6_ref.bzip2_x86
elif options.benchmark == 'gcc_x86':
process = SPEC2k6_ref.gcc_x86
elif options.benchmark == 'mcf_x86':
process = SPEC2k6_ref.mcf_x86
elif options.benchmark == 'milc_x86':
process = SPEC2k6_ref.milc_x86
elif options.benchmark == 'gobmk_x86':
process = SPEC2k6_ref.gobmk_x86
elif options.benchmark == 'hmmer_x86':
process = SPEC2k6_ref.hmmer_x86
elif options.benchmark == 'sjeng_x86':
process = SPEC2k6_ref.sjeng_x86
elif options.benchmark == 'libquantum_x86':
process = SPEC2k6_ref.libquantum_x86
elif options.benchmark == 'h264ref_x86':
process = SPEC2k6_ref.h264ref_x86
elif options.benchmark == 'lbm_x86':
process = SPEC2k6_ref.lbm_x86
elif options.benchmark == 'sphinx3_x86':
process = SPEC2k6_ref.sphinx3_x86
elif options.benchmark == 'specrand_x86':
process = SPEC2k6_ref.specrand_x86
elif options.benchmark == 'bwaves_x86':
process = SPEC2k6_ref.bwaves_x86
elif options.benchmark == 'gamess_x86':
process = SPEC2k6_ref.gamess_x86
elif options.benchmark == 'zeusmp_x86':
process = SPEC2k6_ref.zeusmp_x86
elif options.benchmark == 'leslie3d_x86':
process = SPEC2k6_ref.leslie3d_x86
elif options.benchmark == 'GemsFDTD_x86':
process = SPEC2k6_ref.GemsFDTD_x86
elif options.benchmark == 'tonto_x86':
process = SPEC2k6_ref.tonto_x86
elif options.benchmark == 'namd_x86':
process = SPEC2k6_ref.namd_x86
elif options.benchmark == 'dealII_x86':
process = SPEC2k6_ref.dealII_x86
elif options.benchmark == 'soplex_x86':
process = SPEC2k6_ref.soplex_x86
elif options.benchmark == 'povray_x86':
process = SPEC2k6_ref.povray_x86
elif options.benchmark == 'omnetpp_x86':
process = SPEC2k6_ref.omnetpp_x86
elif options.benchmark == 'astar_x86':
process = SPEC2k6_ref.astar_x86
elif options.benchmark == 'xalancbmk_x86':
process = SPEC2k6_ref.xalancbmk_x86
elif options.benchmark == 'gromacs_x86':
process = SPEC2k6_ref.gromacs_x86
elif options.benchmark == 'cactusADM_x86':
process = SPEC2k6_ref.cactusADM_x86
elif options.benchmark == 'calculix_x86':
process = SPEC2k6_ref.calculix_x86
elif options.benchmark == 'wrf_x86':
process = SPEC2k6_ref.wrf_x86
else:
print "Error: Unknown Benchmark"
sys.exit(1)
else:
print "Error: Not supported benchmark size"
sys.exit(1)
multiprocesses.append(process)
return multiprocesses, 1
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
#PRODROMOU
if options.total_insts:
# Some thread HAS to execute AT LEAST this many instructions
# Gives a very coarse grain breakpoint for the resume logic to kick in
options.maxinsts = options.total_insts / options.num_cpus
if options.checkpoint_restore and options.take_checkpoints:
print "Both restore and record checkpoint options enabled. "
cr_value = int(options.checkpoint_restore)
tc_value = int(options.take_checkpoints)
difference = tc_value - cr_value
options.take_checkpoints = str(difference)
print "Value stored is: " + options.take_checkpoints
#PRODROMOU
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
#Prodromou: Need to add this
elif options.benchmark:
multiprocesses, numThreads = get_processes(options)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
MemClass = Simulation.setMemClass(options)
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
#PRODROMOU: Set the instruction window
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
# Set the option for physmem so that it is not allocated any space
system.physmem = MemClass(range=AddrRange(options.mem_size),
null = True)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
system.membus = CoherentBus()
system.system_port = system.membus.slave
if options.mutlu:
CacheConfig.config_cache_parbs(options, system)
else:
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
if options.dump_interval:
statDump (options.dump_interval)
m5.disableAllListeners()
root = Root(full_system = False, system = system)
#Prodromou: Try to modify the tCL value of the controller
#for ctrl in system.mem_ctrls:
# print "Hello %l" % (ctrl.tCL.getValue())
Simulation.run(options, root, system, FutureClass)
| prodromou87/gem5 | configs/example/se_bench.py | Python | bsd-3-clause | 17,505 | [
"GAMESS",
"Gromacs",
"NAMD"
] | c5f6950b7d3ff5cdcaaf369eb74b7fb087e4f9f733812b20291f0161986051e8 |
""" SingularityCE is a type of "inner" CEs
(meaning it's used by a jobAgent inside a pilot).
A computing element class using singularity containers,
where Singularity is supposed to be found on the WN.
The goal of this CE is to start the job in the container set by
the "ContainerRoot" config option.
DIRAC can be re-installed within the container.
See the Configuration/Resources/Computing documention for details on
where to set the option parameters.
"""
import io
import json
import os
import shutil
import sys
import tempfile
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createRelocatedJobWrapper
# Default container to use if it isn't specified in the CE options
CONTAINER_DEFROOT = "/cvmfs/cernvm-prod.cern.ch/cvm4"
CONTAINER_WORKDIR = "DIRAC_containers"
CONTAINER_INNERDIR = "/tmp"
# What is executed inside the container (2 options given)
CONTAINER_WRAPPER_INSTALL = """#!/bin/bash
echo "Starting inner container wrapper scripts at `date`."
set -ex
cd /tmp
# Install DIRAC
installer_name="DIRACOS-Linux-$(uname -m).sh"
if [[ -d /cvmfs/dirac.egi.eu/installSource/ ]]; then
bash /cvmfs/dirac.egi.eu/installSource/"${installer_name}"
else
curl -LO "https://github.com/DIRACGrid/DIRACOS2/releases/latest/download/${installer_name}"
bash "${installer_name}"
rm "${installer_name}"
fi
source diracos/diracosrc
pip install %(dirac_project)s==%(version)s
dirac-configure -F %(config_args)s -I
# Add compatibility with pilot3 where config is in pilot.cfg
ln -s diracos/etc/dirac.cfg pilot.cfg
# Run next wrapper (to start actual job)
bash %(next_wrapper)s
# Write the payload errorcode to a file for the outer scripts
echo $? > retcode
chmod 644 retcode
echo "Finishing inner container wrapper scripts at `date`."
"""
# Path to a directory on CVMFS to use as a fallback if no
# other version found: Only used if node has user namespaces
FALLBACK_SINGULARITY = "/cvmfs/oasis.opensciencegrid.org/mis/singularity/current/bin"
CONTAINER_WRAPPER_NO_INSTALL = """#!/bin/bash
echo "Starting inner container wrapper scripts (no install) at `date`."
set -x
cd /tmp
export DIRAC=%(dirac_env_var)s
export DIRACOS=%(diracos_env_var)s
# In any case we need to find a bashrc, and a pilot.cfg, both created by the pilot
source %(rc_script)s
# Run next wrapper (to start actual job)
bash %(next_wrapper)s
# Write the payload errorcode to a file for the outer scripts
echo $? > retcode
chmod 644 retcode
echo "Finishing inner container wrapper scripts at `date`."
"""
class SingularityComputingElement(ComputingElement):
"""A Computing Element for running a job within a Singularity container."""
def __init__(self, ceUniqueID):
"""Standard constructor."""
super(SingularityComputingElement, self).__init__(ceUniqueID)
self.__submittedJobs = 0
self.__runningJobs = 0
self.__root = CONTAINER_DEFROOT
if "ContainerRoot" in self.ceParameters:
self.__root = self.ceParameters["ContainerRoot"]
self.__workdir = CONTAINER_WORKDIR
self.__innerdir = CONTAINER_INNERDIR
self.__singularityBin = "singularity"
self.__installDIRACInContainer = self.ceParameters.get("InstallDIRACInContainer", False)
if isinstance(self.__installDIRACInContainer, str) and self.__installDIRACInContainer.lower() in (
"false",
"no",
):
self.__installDIRACInContainer = False
self.processors = int(self.ceParameters.get("NumberOfProcessors", 1))
def __hasUserNS(self):
"""Detect if this node has user namespaces enabled.
Returns True if they are enabled, False otherwise.
"""
try:
with open("/proc/sys/user/max_user_namespaces", "r") as proc_fd:
maxns = int(proc_fd.readline().strip())
# Any "reasonable number" of namespaces is sufficient
return maxns > 100
except Exception:
# Any failure, missing file, doesn't contain a number, etc. and we
# assume they are disabled.
return False
def __hasSingularity(self):
"""Search the current PATH for an exectuable named singularity.
Returns True if it is found, False otherwise.
"""
if self.ceParameters.get("ContainerBin"):
binPath = self.ceParameters["ContainerBin"]
if os.path.isfile(binPath) and os.access(binPath, os.X_OK):
self.__singularityBin = binPath
self.log.debug('Use singularity from "%s"' % self.__singularityBin)
return True
if "PATH" not in os.environ:
return False # Hmm, PATH not set? How unusual...
searchPaths = os.environ["PATH"].split(os.pathsep)
# We can use CVMFS as a last resort if userNS is enabled
if self.__hasUserNS():
searchPaths.append(FALLBACK_SINGULARITY)
for searchPath in searchPaths:
binPath = os.path.join(searchPath, "singularity")
if os.path.isfile(binPath):
# File found, check it's executable to be certain:
if os.access(binPath, os.X_OK):
self.log.debug('Found singularity at "%s"' % binPath)
self.__singularityBin = binPath
return True
# No suitable binaries found
return False
@staticmethod
def __findInstallBaseDir():
"""Find the path to root of the current DIRAC installation"""
return os.path.realpath(sys.base_prefix)
def __getInstallFlags(self, infoDict=None):
"""Get the flags for installing inside the container."""
if not infoDict:
infoDict = {}
setup = infoDict.get("DefaultSetup")
if not setup:
setup = list(infoDict.get("Setups"))[0]
if not setup:
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
setup = str(setup)
diracProject = "DIRAC"
project = str(infoDict.get("Project"))
if not project or project == "None":
diracProject = Operations.Operations(setup=setup).getValue("Pilot/Project", "") + diracProject
diracVersions = str(infoDict["Setups"][setup].get("Version")).split(",")
if not diracVersions:
diracVersions = str(infoDict["Setups"]["Defaults"].get("Version")).split(",")
if not diracVersions:
diracVersions = Operations.Operations(setup=setup).getValue("Pilot/Version", [])
version = diracVersions[0].strip()
return diracProject, version
@staticmethod
def __getConfigFlags(infoDict=None):
"""Get the flags for dirac-configure inside the container.
Returns a string containing the command line flags.
"""
if not infoDict:
infoDict = {}
cfgOpts = []
setup = infoDict.get("DefaultSetup")
if not setup:
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
cfgOpts.append("-S '%s'" % setup)
csServers = infoDict.get("ConfigurationServers")
if not csServers:
csServers = gConfig.getValue("/DIRAC/Configuration/Servers", [])
cfgOpts.append("-C '%s'" % ",".join([str(ce) for ce in csServers]))
cfgOpts.append("-n '%s'" % DIRAC.siteName())
return " ".join(cfgOpts)
def __createWorkArea(self, jobDesc=None, log=None, logLevel="INFO", proxy=None):
"""Creates a directory for the container and populates it with the
template directories, scripts & proxy.
"""
if not jobDesc:
jobDesc = {}
if not log:
log = gLogger
# Create the directory for our container area
try:
os.mkdir(self.__workdir)
except OSError:
if not os.path.isdir(self.__workdir):
result = S_ERROR("Failed to create container base directory '%s'" % self.__workdir)
result["ReschedulePayload"] = True
return result
# Otherwise, directory probably just already exists...
baseDir = None
try:
baseDir = tempfile.mkdtemp(prefix="job%s_" % jobDesc.get("jobID", 0), dir=self.__workdir)
except OSError:
result = S_ERROR("Failed to create container work directory in '%s'" % self.__workdir)
result["ReschedulePayload"] = True
return result
self.log.debug("Use singularity workarea: %s" % baseDir)
for subdir in ["home", "tmp", "var_tmp"]:
os.mkdir(os.path.join(baseDir, subdir))
tmpDir = os.path.join(baseDir, "tmp")
# Now we have a directory, we can stage in the proxy and scripts
# Proxy
if proxy:
proxyLoc = os.path.join(tmpDir, "proxy")
rawfd = os.open(proxyLoc, os.O_WRONLY | os.O_CREAT, 0o600)
fd = os.fdopen(rawfd, "w")
fd.write(proxy)
fd.close()
else:
self.log.warn("No user proxy")
# Job Wrapper (Standard-ish DIRAC wrapper)
result = createRelocatedJobWrapper(
wrapperPath=tmpDir,
rootLocation=self.__innerdir,
jobID=jobDesc.get("jobID", 0),
jobParams=jobDesc.get("jobParams", {}),
resourceParams=jobDesc.get("resourceParams", {}),
optimizerParams=jobDesc.get("optimizerParams", {}),
log=log,
logLevel=logLevel,
extraOptions="" if self.__installDIRACInContainer else "/tmp/pilot.cfg",
)
if not result["OK"]:
result["ReschedulePayload"] = True
return result
wrapperPath = result["Value"]
if self.__installDIRACInContainer:
infoDict = None
if os.path.isfile("pilot.json"): # if this is a pilot 3 this file should be found
with io.open("pilot.json") as pj:
infoDict = json.load(pj)
# Extra Wrapper (Container DIRAC installer)
installFlags = self.__getInstallFlags(infoDict)
wrapSubs = {
"next_wrapper": wrapperPath,
"dirac_project": installFlags[0],
"version": installFlags[1],
"config_args": self.__getConfigFlags(infoDict),
}
CONTAINER_WRAPPER = CONTAINER_WRAPPER_INSTALL
else: # In case we don't (re)install DIRAC
wrapSubs = {
"next_wrapper": wrapperPath,
"dirac_env_var": os.environ.get("DIRAC", ""),
"diracos_env_var": os.environ.get("DIRACOS", ""),
}
wrapSubs["rc_script"] = os.path.join(self.__findInstallBaseDir(), "diracosrc")
shutil.copyfile("pilot.cfg", os.path.join(tmpDir, "pilot.cfg"))
CONTAINER_WRAPPER = CONTAINER_WRAPPER_NO_INSTALL
wrapLoc = os.path.join(tmpDir, "dirac_container.sh")
rawfd = os.open(wrapLoc, os.O_WRONLY | os.O_CREAT, 0o700)
fd = os.fdopen(rawfd, "w")
fd.write(CONTAINER_WRAPPER % wrapSubs)
fd.close()
ret = S_OK()
ret["baseDir"] = baseDir
ret["tmpDir"] = tmpDir
if proxy:
ret["proxyLocation"] = proxyLoc
return ret
def __deleteWorkArea(self, baseDir):
"""Deletes the container work area (baseDir path) unless 'KeepWorkArea'
option is set. Returns None.
"""
if self.ceParameters.get("KeepWorkArea", False):
return
# We can't really do anything about errors: The tree should be fully owned
# by the pilot user, so we don't expect any permissions problems.
shutil.rmtree(baseDir, ignore_errors=True)
def __getEnv(self):
"""Gets the environment for use within the container.
We blank almost everything to prevent contamination from the host system.
"""
payloadEnv = {}
if "TERM" in os.environ:
payloadEnv["TERM"] = os.environ["TERM"]
payloadEnv["TMP"] = "/tmp"
payloadEnv["TMPDIR"] = "/tmp"
payloadEnv["X509_USER_PROXY"] = os.path.join(self.__innerdir, "proxy")
return payloadEnv
@staticmethod
def __checkResult(tmpDir):
"""Gets the result of the payload command and returns it."""
# The wrapper writes the inner job return code to "retcode"
# in the working directory.
try:
with open(os.path.join(tmpDir, "retcode"), "rt") as fp:
retCode = int(fp.read())
except (IOError, ValueError):
# Something failed while trying to get the return code
result = S_ERROR("Failed to get return code from inner wrapper")
result["ReschedulePayload"] = True
return result
result = S_OK()
if retCode:
# This is the one case where we don't reschedule:
# An actual failure of the inner payload for some reason
result = S_ERROR("Command failed with exit code %d" % retCode)
return result
def submitJob(self, executableFile, proxy=None, **kwargs):
"""Start a container for a job.
executableFile is ignored. A new wrapper suitable for running in a
container is created from jobDesc.
"""
rootImage = self.__root
# Check that singularity is available
if not self.__hasSingularity():
self.log.error("Singularity is not installed on PATH.")
result = S_ERROR("Failed to find singularity ")
result["ReschedulePayload"] = True
return result
self.log.info("Creating singularity container")
# Start by making the directory for the container
ret = self.__createWorkArea(kwargs.get("jobDesc"), kwargs.get("log"), kwargs.get("logLevel", "INFO"), proxy)
if not ret["OK"]:
return ret
baseDir = ret["baseDir"]
tmpDir = ret["tmpDir"]
if proxy:
payloadProxyLoc = ret["proxyLocation"]
# Now we have to set-up payload proxy renewal for the container
# This is fairly easy as it remains visible on the host filesystem
result = gThreadScheduler.addPeriodicTask(
self.proxyCheckPeriod, self._monitorProxy, taskArgs=(payloadProxyLoc,), executions=0, elapsedTime=0
)
if result["OK"]:
renewTask = result["Value"]
else:
self.log.warn("Failed to start proxy renewal task")
renewTask = None
# Very simple accounting
self.__submittedJobs += 1
self.__runningJobs += 1
# Now prepare start singularity
# Mount /cvmfs in if it exists on the host
withCVMFS = os.path.isdir("/cvmfs")
innerCmd = os.path.join(self.__innerdir, "dirac_container.sh")
cmd = [self.__singularityBin, "exec"]
cmd.extend(["--contain"]) # use minimal /dev and empty other directories (e.g. /tmp and $HOME)
cmd.extend(["--ipc", "--pid"]) # run container in new IPC and PID namespaces
cmd.extend(["--workdir", baseDir]) # working directory to be used for /tmp, /var/tmp and $HOME
if self.__hasUserNS():
cmd.append("--userns")
if withCVMFS:
cmd.extend(["--bind", "/cvmfs"])
if not self.__installDIRACInContainer:
cmd.extend(["--bind", "{0}:{0}:ro".format(self.__findInstallBaseDir())])
if "ContainerBind" in self.ceParameters:
bindPaths = self.ceParameters["ContainerBind"].split(",")
for bindPath in bindPaths:
if len(bindPath.split(":::")) == 1:
cmd.extend(["--bind", bindPath.strip()])
elif len(bindPath.split(":::")) in [2, 3]:
cmd.extend(["--bind", ":".join([bp.strip() for bp in bindPath.split(":::")])])
if "ContainerOptions" in self.ceParameters:
containerOpts = self.ceParameters["ContainerOptions"].split(",")
for opt in containerOpts:
cmd.extend([opt.strip()])
if os.path.isdir(rootImage) or os.path.isfile(rootImage):
cmd.extend([rootImage, innerCmd])
else:
# if we are here is because there's no image, or it is not accessible (e.g. not on CVMFS)
self.log.error("Singularity image to exec not found: ", rootImage)
result = S_ERROR("Failed to find singularity image to exec")
result["ReschedulePayload"] = True
return result
self.log.debug("Execute singularity command: %s" % cmd)
self.log.debug("Execute singularity env: %s" % self.__getEnv())
result = systemCall(0, cmd, callbackFunction=self.sendOutput, env=self.__getEnv())
self.__runningJobs -= 1
if not result["OK"]:
self.log.error("Fail to run Singularity", result["Message"])
if proxy and renewTask:
gThreadScheduler.removeTask(renewTask)
self.__deleteWorkArea(baseDir)
result = S_ERROR("Error running singularity command")
result["ReschedulePayload"] = True
return result
result = self.__checkResult(tmpDir)
if proxy and renewTask:
gThreadScheduler.removeTask(renewTask)
self.__deleteWorkArea(baseDir)
return result
def getCEStatus(self):
"""Method to return information on running and pending jobs."""
result = S_OK()
result["SubmittedJobs"] = self.__submittedJobs
result["RunningJobs"] = self.__runningJobs
result["WaitingJobs"] = 0
# processors
result["AvailableProcessors"] = self.processors
return result
| DIRACGrid/DIRAC | src/DIRAC/Resources/Computing/SingularityComputingElement.py | Python | gpl-3.0 | 18,098 | [
"DIRAC"
] | 8d08a0e4cb95c89249ada39a4631aaf52a7522cbe792c9995a6f9c5cd3d2fa79 |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""Finite difference operators.
This file defines a series of finite difference operators used in grid mode.
"""
from __future__ import division
from math import pi
import numpy as np
from numpy.fft import fftn, ifftn
from gpaw import debug, extra_parameters
from gpaw.utilities import fact
import _gpaw
# Expansion coefficients for finite difference Laplacian. The numbers are
# from J. R. Chelikowsky et al., Phys. Rev. B 50, 11355 (1994):
laplace = [[0],
[-2, 1],
[-5/2, 4/3, -1/12],
[-49/18, 3/2, -3/20, 1/90],
[-205/72, 8/5, -1/5, 8/315, -1/560],
[-5269/1800, 5/3, -5/21, 5/126, -5/1008, 1/3150],
[-5369/1800, 12/7, -15/56, 10/189, -1/112, 2/1925, -1/16632]]
class FDOperator:
def __init__(self, coef_p, offset_pc, gd, dtype=float,
description=None):
"""FDOperator(coefs, offsets, gd, dtype) -> FDOperator object.
"""
# Is this a central finite-difference type of stencil?
cfd = True
for offset_c in offset_pc:
if sum([offset != 0 for offset in offset_c]) >= 2:
cfd = False
maxoffset_c = [max([offset_c[c] for offset_c in offset_pc])
for c in range(3)]
mp = maxoffset_c[0]
if maxoffset_c[1] != mp or maxoffset_c[2] != mp:
## print 'Warning: this should be optimized XXXX', maxoffsets, mp
mp = max(maxoffset_c)
n_c = gd.n_c
M_c = n_c + 2 * mp
stride_c = np.array([M_c[1] * M_c[2], M_c[2], 1])
offset_p = np.dot(offset_pc, stride_c)
coef_p = np.ascontiguousarray(coef_p, float)
neighbor_cd = gd.neighbor_cd
assert np.rank(coef_p) == 1
assert coef_p.shape == offset_p.shape
assert dtype in [float, complex]
self.dtype = dtype
self.shape = tuple(n_c)
if gd.comm.size > 1:
comm = gd.comm.get_c_object()
else:
comm = None
assert neighbor_cd.flags.c_contiguous and offset_p.flags.c_contiguous
self.mp = mp # padding
self.gd = gd
self.npoints = len(coef_p)
self.operator = _gpaw.Operator(coef_p, offset_p, n_c, mp,
neighbor_cd, dtype == float,
comm, cfd)
if description is None:
description = '%d point finite-difference stencil' % self.npoints
self.description = description
def __str__(self):
return '<' + self.description + '>'
def apply(self, in_xg, out_xg, phase_cd=None):
self.operator.apply(in_xg, out_xg, phase_cd)
def relax(self, relax_method, f_g, s_g, n, w=None):
self.operator.relax(relax_method, f_g, s_g, n, w)
def get_diagonal_element(self):
return self.operator.get_diagonal_element()
def get_async_sizes(self):
return self.operator.get_async_sizes()
if debug:
_FDOperator = FDOperator
class FDOperator(_FDOperator):
def apply(self, in_xg, out_xg, phase_cd=None):
assert in_xg.shape == out_xg.shape
assert in_xg.shape[-3:] == self.shape
assert in_xg.flags.contiguous
assert in_xg.dtype == self.dtype
assert out_xg.flags.contiguous
assert out_xg.dtype == self.dtype
assert (self.dtype == float or
(phase_cd.dtype == complex and
phase_cd.shape == (3, 2)))
_FDOperator.apply(self, in_xg, out_xg, phase_cd)
def relax(self, relax_method, f_g, s_g, n, w=None):
assert f_g.shape == self.shape
assert s_g.shape == self.shape
assert f_g.flags.contiguous
assert f_g.dtype == float
assert s_g.flags.contiguous
assert s_g.dtype == float
assert self.dtype == float
_FDOperator.relax(self, relax_method, f_g, s_g, n, w)
class Gradient(FDOperator):
def __init__(self, gd, v, scale=1.0, n=1, dtype=float):
h = (gd.h_cv**2).sum(1)**0.5
d = gd.xxxiucell_cv[:,v]
A=np.zeros((2*n+1,2*n+1))
for i,io in enumerate(range(-n,n+1)):
for j in range(2*n+1):
A[i,j]=io**j/float(fact(j))
A[n,0]=1.
coefs=np.linalg.inv(A)[1]
coefs=np.delete(coefs,len(coefs)//2)
offs=np.delete(np.arange(-n,n+1),n)
coef_p = []
offset_pc = []
for i in range(3):
if abs(d[i])>1e-11:
coef_p.extend(list(coefs * d[i] / h[i] * scale))
offset = np.zeros((2*n, 3), int)
offset[:,i]=offs
offset_pc.extend(offset)
FDOperator.__init__(self, coef_p, offset_pc, gd, dtype,
'O(h^%d) %s-gradient stencil' % (2 * n, 'xyz'[v]))
def Laplace(gd, scale=1.0, n=1, dtype=float):
if n == 9:
return FTLaplace(gd, scale, dtype)
else:
return GUCLaplace(gd, scale, n, dtype)
class GUCLaplace(FDOperator):
def __init__(self, gd, scale=1.0, n=1, dtype=float):
"""Laplacian for general non orthorhombic grid.
gd: GridDescriptor
Descriptor for grid.
scale: float
Scaling factor. Use scale=-0.5 for a kinetic energy operator.
n: int
Range of stencil. Stencil has O(h^(2n)) error.
dtype: float or complex
Datatype to work on.
"""
# Order the 13 neighbor grid points:
M_ic = np.indices((3, 3, 3)).reshape((3, -3)).T[-13:] - 1
u_cv = gd.h_cv / (gd.h_cv**2).sum(1)[:, np.newaxis]**0.5
u2_i = (np.dot(M_ic, u_cv)**2).sum(1)
i_d = u2_i.argsort()
m_mv = np.array([(2, 0, 0), (0, 2, 0), (0, 0, 2),
(0, 1, 1), (1, 0, 1), (1, 1, 0)])
# Try 3, 4, 5 and 6 directions:
for D in range(3, 7):
h_dv = np.dot(M_ic[i_d[:D]], gd.h_cv)
A_md = (h_dv**m_mv[:, np.newaxis, :]).prod(2)
a_d, residual, rank, s = np.linalg.lstsq(A_md, [1, 1, 1, 0, 0, 0])
if residual.sum() < 1e-14:
assert rank == D
# D directions was OK
break
a_d *= scale
offsets = [(0,0,0)]
coefs = [laplace[n][0] * a_d.sum()]
for d in range(D):
M_c = M_ic[i_d[d]]
offsets.extend(np.arange(1, n + 1)[:, np.newaxis] * M_c)
coefs.extend(a_d[d] * np.array(laplace[n][1:]))
offsets.extend(np.arange(-1, -n - 1, -1)[:, np.newaxis] * M_c)
coefs.extend(a_d[d] * np.array(laplace[n][1:]))
FDOperator.__init__(self, coefs, offsets, gd, dtype)
self.description = (
'%d*%d+1=%d point O(h^%d) finite-difference Laplacian' %
((self.npoints - 1) // n, n, self.npoints, 2 * n))
class LaplaceA(FDOperator):
def __init__(self, gd, scale, dtype=float):
assert gd.orthogonal
c = np.divide(-1/12, gd.h_cv.diagonal()**2) * scale # Why divide? XXX
c0 = c[1] + c[2]
c1 = c[0] + c[2]
c2 = c[1] + c[0]
a = -16.0 * np.sum(c)
b = 10.0 * c + 0.125 * a
FDOperator.__init__(self,
[a,
b[0], b[0],
b[1], b[1],
b[2], b[2],
c0, c0, c0, c0,
c1, c1, c1, c1,
c2, c2, c2, c2],
[(0, 0, 0),
(-1, 0, 0), (1, 0, 0),
(0, -1, 0), (0, 1, 0),
(0, 0, -1), (0, 0, 1),
(0, -1, -1), (0, -1, 1), (0, 1, -1), (0, 1, 1),
(-1, 0, -1), (-1, 0, 1), (1, 0, -1), (1, 0, 1),
(-1, -1, 0), (-1, 1, 0), (1, -1, 0), (1, 1, 0)],
gd, dtype,
'O(h^4) Mehrstellen Laplacian (A)')
class LaplaceB(FDOperator):
def __init__(self, gd, dtype=float):
a = 0.5
b = 1.0 / 12.0
FDOperator.__init__(self,
[a,
b, b, b, b, b, b],
[(0, 0, 0),
(-1, 0, 0), (1, 0, 0),
(0, -1, 0), (0, 1, 0),
(0, 0, -1), (0, 0, 1)],
gd, dtype,
'O(h^4) Mehrstellen Laplacian (B)')
class FTLaplace:
def __init__(self, gd, scale, dtype):
assert gd.comm.size == 1 and gd.pbc_c.all()
N_c1 = gd.N_c[:, np.newaxis]
i_cq = np.indices(gd.N_c).reshape((3, -1))
i_cq += N_c1 // 2
i_cq %= N_c1
i_cq -= N_c1 // 2
B_vc = 2.0 * pi * gd.icell_cv.T
k_vq = np.dot(B_vc, i_cq)
k_vq *= k_vq
self.k2_Q = k_vq.sum(axis=0).reshape(gd.N_c)
self.k2_Q *= -scale
self.d = 6.0 / gd.h_cv[0, 0]**2
self.npoints = 1000
def apply(self, in_xg, out_xg, phase_cd=None):
if in_xg.ndim > 3:
for in_g, out_g in zip(in_xg, out_xg):
out_g[:] = ifftn(fftn(in_g) * self.k2_Q).real
else:
out_xg[:] = ifftn(fftn(in_xg) * self.k2_Q).real
def get_diagonal_element(self):
return self.d
| ajylee/gpaw-rtxs | gpaw/fd_operators.py | Python | gpl-3.0 | 9,613 | [
"GPAW"
] | 04d9d5457f46e981cc75df8ea8d43ae169a1a5fea4fa0945208412736809440c |
#!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslb
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of Gslb Avi RESTful Object
description:
- This module is used to configure Gslb object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
async_interval:
description:
- Frequency with which messages are propagated to vs mgr.
- Value of 0 disables async behavior and rpc are sent inline.
- Allowed values are 0-5.
- Field introduced in 18.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.9"
clear_on_max_retries:
description:
- Max retries after which the remote site is treated as a fresh start.
- In fresh start all the configs are downloaded.
- Allowed values are 1-1024.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
client_ip_addr_group:
description:
- Group to specify if the client ip addresses are public or private.
- Field introduced in 17.1.2.
version_added: "2.4"
description:
description:
- User defined description for the object.
dns_configs:
description:
- Sub domain configuration for the gslb.
- Gslb service's fqdn must be a match one of these subdomains.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.4"
type: bool
leader_cluster_uuid:
description:
- Mark this site as leader of gslb configuration.
- This site is the one among the avi sites.
required: true
maintenance_mode:
description:
- This field disables the configuration operations on the leader for all federated objects.
- Cud operations on gslb, gslbservice, gslbgeodbprofile and other federated objects will be rejected.
- The rest-api disabling helps in upgrade scenarios where we don't want configuration sync operations to the gslb member when the member is being
- upgraded.
- This configuration programmatically blocks the leader from accepting new gslb configuration when member sites are undergoing upgrade.
- Field introduced in 17.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
name:
description:
- Name for the gslb object.
required: true
send_interval:
description:
- Frequency with which group members communicate.
- Allowed values are 1-3600.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
send_interval_prior_to_maintenance_mode:
description:
- The user can specify a send-interval while entering maintenance mode.
- The validity of this 'maintenance send-interval' is only during maintenance mode.
- When the user leaves maintenance mode, the original send-interval is reinstated.
- This internal variable is used to store the original send-interval.
- Field introduced in 18.2.3.
version_added: "2.9"
sites:
description:
- Select avi site member belonging to this gslb.
tenant_ref:
description:
- It is a reference to an object of type tenant.
third_party_sites:
description:
- Third party site member belonging to this gslb.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the gslb object.
view_id:
description:
- The view-id is used in change-leader mode to differentiate partitioned groups while they have the same gslb namespace.
- Each partitioned group will be able to operate independently by using the view-id.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Gslb object
avi_gslb:
name: "test-gslb"
avi_credentials:
username: '{{ username }}'
password: '{{ password }}'
controller: '{{ controller }}'
sites:
- name: "test-site1"
username: "gslb_username"
password: "gslb_password"
ip_addresses:
- type: "V4"
addr: "10.10.28.83"
enabled: True
member_type: "GSLB_ACTIVE_MEMBER"
port: 443
cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829"
- name: "test-site2"
username: "gslb_username"
password: "gslb_password"
ip_addresses:
- type: "V4"
addr: "10.10.28.86"
enabled: True
member_type: "GSLB_ACTIVE_MEMBER"
port: 443
cluster_uuid: "cluster-0c37ae8d-ab62-410c-ad3e-06fa831950b1"
dns_configs:
- domain_name: "test1.com"
- domain_name: "test2.com"
leader_cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829"
- name: Update Gslb site's configurations (Patch Add Operation)
avi_gslb:
avi_credentials:
username: '{{ username }}'
password: '{{ password }}'
controller: '{{ controller }}'
avi_api_update_method: patch
avi_api_patch_op: add
leader_cluster_uuid: "cluster-d4ee5fcc-3e0a-4d4f-9ae6-4182bc605829"
name: "test-gslb"
dns_configs:
- domain_name: "temp1.com"
- domain_name: "temp2.com"
gslb_sites_config:
- ip_addr: "10.10.28.83"
dns_vses:
- dns_vs_uuid: "virtualservice-f2a711cd-5e78-473f-8f47-d12de660fd62"
domain_names:
- "test1.com"
- "test2.com"
- ip_addr: "10.10.28.86"
dns_vses:
- dns_vs_uuid: "virtualservice-c1a63a16-f2a1-4f41-aab4-1e90f92a5e49"
domain_names:
- "temp1.com"
- "temp2.com"
- name: Update Gslb site's configurations (Patch Replace Operation)
avi_gslb:
avi_credentials:
username: "{{ username }}"
password: "{{ password }}"
controller: "{{ controller }}"
# On basis of cluster leader uuid dns_configs is set for that particular leader cluster
leader_cluster_uuid: "cluster-84aa795f-8f09-42bb-97a4-5103f4a53da9"
name: "test-gslb"
avi_api_update_method: patch
avi_api_patch_op: replace
dns_configs:
- domain_name: "test3.com"
- domain_name: "temp3.com"
gslb_sites_config:
# Ip address is mapping key for dns_vses field update. For the given IP address,
# dns_vses is updated.
- ip_addr: "10.10.28.83"
dns_vses:
- dns_vs_uuid: "virtualservice-7c947ed4-77f3-4a52-909c-4f12afaf5bb0"
domain_names:
- "test3.com"
- ip_addr: "10.10.28.86"
dns_vses:
- dns_vs_uuid: "virtualservice-799b2c6d-7f2d-4c3f-94c6-6e813b20b674"
domain_names:
- "temp3.com"
- name: Update Gslb site's configurations (Patch Delete Operation)
avi_gslb:
avi_credentials:
username: "{{ username }}"
password: "{{ password }}"
controller: "{{ controller }}"
# On basis of cluster leader uuid dns_configs is set for that particular leader cluster
leader_cluster_uuid: "cluster-84aa795f-8f09-42bb-97a4-5103f4a53da9"
name: "test-gslb"
avi_api_update_method: patch
avi_api_patch_op: delete
dns_configs:
gslb_sites_config:
- ip_addr: "10.10.28.83"
- ip_addr: "10.10.28.86"
"""
RETURN = '''
obj:
description: Gslb (api/gslb) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
from ansible.module_utils.network.avi.avi_api import ApiSession, AviCredentials
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
async_interval=dict(type='int',),
clear_on_max_retries=dict(type='int',),
client_ip_addr_group=dict(type='dict',),
description=dict(type='str',),
dns_configs=dict(type='list',),
is_federated=dict(type='bool',),
leader_cluster_uuid=dict(type='str', required=True),
maintenance_mode=dict(type='bool',),
name=dict(type='str', required=True),
send_interval=dict(type='int',),
send_interval_prior_to_maintenance_mode=dict(type='int',),
sites=dict(type='list',),
tenant_ref=dict(type='str',),
third_party_sites=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
view_id=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
api_method = module.params['avi_api_update_method']
if str(api_method).lower() == 'patch':
patch_op = module.params['avi_api_patch_op']
# Create controller session
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
api = ApiSession.get_session(
api_creds.controller, api_creds.username, password=api_creds.password,
timeout=api_creds.timeout, tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid, token=api_creds.token,
port=api_creds.port)
# Get existing gslb objects
rsp = api.get('gslb', api_version=api_creds.api_version)
existing_gslb = rsp.json()
gslb = existing_gslb['results']
sites = module.params['gslb_sites_config']
for gslb_obj in gslb:
# Update/Delete domain names in dns_configs fields in gslb object.
if 'dns_configs' in module.params:
if gslb_obj['leader_cluster_uuid'] == module.params['leader_cluster_uuid']:
if str(patch_op).lower() == 'delete':
gslb_obj['dns_configs'] = []
elif str(patch_op).lower() == 'add':
if module.params['dns_configs'] not in gslb_obj['dns_configs']:
gslb_obj['dns_configs'].extend(module.params['dns_configs'])
else:
gslb_obj['dns_configs'] = module.params['dns_configs']
# Update/Delete sites configuration
if sites:
for site_obj in gslb_obj['sites']:
dns_vses = site_obj.get('dns_vses', [])
for obj in sites:
config_for = obj.get('ip_addr', None)
if not config_for:
return module.fail_json(msg=(
"ip_addr of site in a configuration is mandatory. "
"Please provide ip_addr i.e. gslb site's ip."))
if config_for == site_obj['ip_addresses'][0]['addr']:
if str(patch_op).lower() == 'delete':
site_obj['dns_vses'] = []
else:
# Modify existing gslb sites object
for key, val in obj.items():
if key == 'dns_vses' and str(patch_op).lower() == 'add':
found = False
# Check dns_vses field already exists on the controller
for v in dns_vses:
if val[0]['dns_vs_uuid'] != v['dns_vs_uuid']:
found = True
break
if not found:
dns_vses.extend(val)
else:
site_obj[key] = val
if str(patch_op).lower() == 'add':
site_obj['dns_vses'] = dns_vses
uni_dns_configs = [dict(tupleized) for tupleized in set(tuple(item.items())
for item in gslb_obj['dns_configs'])]
gslb_obj['dns_configs'] = uni_dns_configs
module.params.update(gslb_obj)
module.params.update(
{
'avi_api_update_method': 'put',
'state': 'present'
}
)
return avi_ansible_api(module, 'gslb',
set([]))
if __name__ == '__main__':
main()
| Dhivyap/ansible | lib/ansible/modules/network/avi/avi_gslb.py | Python | gpl-3.0 | 14,826 | [
"VisIt"
] | b3da20c2e3e31c8dd711cad68803efa4399b7f30d5ff632eb3859fd614578f1b |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
##
""" Search dialogs for profile objects """
from kiwi.ui.objectlist import Column
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.gui.search.searcheditor import SearchEditor
from stoqlib.domain.profile import UserProfile
from stoqlib.gui.editors.profileeditor import UserProfileEditor
_ = stoqlib_gettext
class UserProfileSearch(SearchEditor):
title = _("User Profile Search")
search_spec = UserProfile
editor_class = UserProfileEditor
size = (465, 390)
advanced_search = False
search_label = _('Profiles Matching:')
def create_filters(self):
self.set_text_field_columns(['name'])
#
# SearchDialog Hooks
#
def get_columns(self):
return [Column('name', _('Profile'), data_type=str,
expand=True, sorted=True)]
| andrebellafronte/stoq | stoqlib/gui/search/profilesearch.py | Python | gpl-2.0 | 1,736 | [
"VisIt"
] | a5618667837933c0cd8aca5fee42bb24230221f76e9c872d7fc84e427c54889c |
#!/usr/bin/env python
"""
# Created: Tue, 16 Apr 2013 14:49:03 +1000
SeqHandler provides low level operations on sequence files (FASTA, GenBank or
EMBL files).
Requires Biopython, see: http://biopython.org/wiki/Main_Page
& Python 2.7.3 or similar
SPLIT MODULE
============
Use this Module if you want to split a gbk/embl file on a particular feature.
e.g. source, fasta_record
An example would be :
python SeqHandler.py split test.embl splitTest -f source -i embl
Where:
* test.embl is the input file you want to split (gbk or embl).
* -i flag, specifies the input file type (genbank/embl)
If none is specified the script will assume input is a Genbank file
* splitTest is the output directory for all the resulting files.
* -o flag, specifies the output file type (genbank/embl/fasta)
* -f flag, denotes the feature you want to split on (In this case
'source'). If none is specified the script will split on 'fasta_record'
N.B The script will try to rename the split files using the 'note' attached
to the feature you're trying to split on.
If no output format is specified (-o) flag, the file format for split files
will be the same as the input. i.e embl input, get embl output.
MERGE MODULE
============
Use this module if you want to merge a genbank, embl or fasta records.
An example would be: python SeqHandler.py merge test.gbk -i genbank test2.fna -o fasta
Where:
* test.gbk is the input file you want to merge (gbk/embl/fasta).
* test2.fna is the output file.
* -i flag, specifies the input file type (genbank/embl/fasta)
If none is specified the script will assume input is a Genbank file
* -o flag, specifies the output file type (genbank/embl/fasta)
N.B Input only takes one file. If you want to merge multiple seperate files
together you'll need to first concatenate them together.
If no output format is specified (-o) flag, the file format for split files
will be the same as the input. i.e embl input, get embl output.
CONVERT MODULE
==============
Use this module if you want to convert a sequence file.
An example would be:
python SeqHandler.py convert test.gbk test2.fna -o fasta -i genbank
Where:
* test.gbk is the input file you want to merge
* test2.fna is the output file.
* -i flag, specifies the input file type
If none is specified the script will assume input is a Genbank file
* -o flag, specifies the output file type
If none is specified the script will assume input is a fasta file
See USAGE (python SeqHandler.py convert -h) for full list of supported files.
### CHANGE LOG ###
2013-04-16 Nabil-Fareed Alikhan <[email protected]>
* Version 0.3
* Initial build
2013-04-17 Nabil-Fareed Alikhan <[email protected]>
* Version 0.5
* Reworked GBKSplit to SeqHandler
* Created sub modules: split, merge & convert files
* Explicit control of Input/Output for modules
2013-09-05 Nabil-Fareed Alikhan <[email protected]>
* Changed fasta header handling for Prokka input in merge
* Added header override flags for merge
2013-09-05 Mitchell Stanon-Cook <[email protected]>-
* Made into an installable package
* Installs a script (SeqHandler) system wide
* Small improvements in terms of using __init__ as a meta container
2013-09-06 Mitchell Stanon-Cook <[email protected]>-
* Added option to convert to/from gff
"""
import SeqHandler.__init__ as meta
import sys, os, traceback, argparse
import time
from Bio import SeqIO
from BCBio import GFF
epi = "Licence: %s by %s <%s>" % (meta.__license__,
meta.__author__,
meta.__author_email__)
__doc__ = " %s v%s - %s (%s)" % (meta.__title__,
meta.__version__,
meta.__description__,
meta.__url__)
def to_GFF(args):
"""
Convert a GenBank or EMBL file to GFF
Biopython does not natively support GFF
Can be useful for QUAST (Quality Assessment Tool for Genome Assemblies)
:param args: an argparse args list
"""
in_type = args.inFormat.lower()
with open(args.input) as fin, open(args.output, 'w') as fout:
GFF.write(SeqIO.parse(fin, in_type), fout)
def convertMod(args):
if args.outFormat.lower() == 'gff':
acceptable = ['genbank', 'embl']
if args.inFormat.lower() in acceptable:
to_GFF(args)
return None
else:
sys.err.write("ERROR: ValueError, Could not convert file\n")
return None
else:
try:
count = SeqIO.convert(args.input, args.inFormat, args.output, args.outFormat )
if count == 0:
sys.err.write('ERROR: No records converted. Possibly wrong input filetype\n')
else:
if args.verbose: sys.err.write("Converted %i records\n" %count)
except ValueError:
sys.err.write("ERROR: ValueError, Could not convert file\n")
return None
def mergeMod(args):
filetype = args.inFormat
# Load file as SeqRecord
int_handle = open(args.input, "r")
recs = list(SeqIO.parse(int_handle, filetype))
# For each SeqRecord, I.e. complete gbk annotation obj in file
fgbk = recs[0]
from Bio.SeqFeature import SeqFeature, FeatureLocation
d = SeqFeature(FeatureLocation(0, len(fgbk) ), type="fasta_record",\
strand=1)
d.qualifiers["note"] = recs[0].name
fgbk.features.append(d)
for l in recs[1:]:
d = SeqFeature(FeatureLocation(len(fgbk), len(fgbk) + len(l)), type="fasta_record",\
strand=1)
d.qualifiers["note"] = l.name
fgbk.features.append(d)
fgbk += l
fgbk.name = recs[0].name
fgbk.description = recs[0].description
fgbk.annotations = recs[0].annotations
if args.accession != None:
fgbk.name = args.accession
if args.ver != None:
fgbk.id = fgbk.name +'.' + args.ver
for f in fgbk.features:
if f.type == 'source':
fgbk.features.remove(f)
d = SeqFeature(FeatureLocation(0, len(fgbk)), type="source", strand=1)
fgbk.features.insert(0,d)
outtype = filetype
if args.outFormat != None:
outtype = args.outFormat
out_handle = open( args.output,"w")
SeqIO.write(fgbk, out_handle, outtype)
def splitMod(args):
# Input Filetype, check if gbk or embl
filetype = args.inFormat
# Load file as SeqRecord
int_handle = open(args.input, "r")
Recs = SeqIO.parse(int_handle, filetype)
if not os.path.exists(args.outputDir):
os.mkdir(args.outputDir)
# For each SeqRecord, I.e. complete gbk annotation obj in file
count = 0
for rec in Recs:
for feat in rec.features:
# Split file everytime we see user-defined feature
# (fasta_record if not specified, could be source if user
# Uses ' -f source '
if feat.type == args.feature:
count += 1
subname = ''
# Append note/annotation for feature to filename
#TODO: Clean up note field is pretty basic
if feat.qualifiers.has_key('note'):
subname = feat.qualifiers['note'][0].replace(' ','-')\
.replace(';','')
# TODO: ID/Accession would probably need to have contig
# number suffixed
Finalname = subname + str(os.path.basename(args.input))
outtype = filetype
if args.outFormat != None:
outtype = args.outFormat
outSuffix = '.' + outtype
if outtype == 'genbank': outSuffix = '.gbk'
if not Finalname.endswith(outSuffix): Finalname += outSuffix
out_handle = open( os.path.join(args.outputDir, Finalname ),\
"w")
# Create new SeqRecord between location of split feature
finalGbk = rec[feat.location.start:feat.location.end]
# Copy annotations-Header from old file to new file
finalGbk.annotations = rec.annotations
# Write Output file to specified dir
SeqIO.write(finalGbk, out_handle, outtype)
if count == 0:
sys.write.err('No file generated; Wrong feature specified? ' +
'see -f flag\n')
if __name__ == '__main__':
try:
start_time = time.time()
parser = argparse.ArgumentParser(description=__doc__ ,epilog=epi)
parser.add_argument ('-v', '--verbose', action='store_true', \
default=False, help='verbose output')
parser.add_argument('--version', action='version', version='%(prog)s '\
+ meta.__version__)
subparsers = parser.add_subparsers(help='modules')
split_parser = subparsers.add_parser('split', help='Splits sequence files')
split_parser.add_argument('-f','--feature',action='store',default=\
'fasta_record',\
help='Annotation feature to split on [def: "fasta_record"]')
split_parser.add_argument ('input', action='store', \
help='Input annotation file to split')
split_parser.add_argument('-i','--inFormat',action='store',\
choices=('embl', 'genbank'),\
default='genbank', help='Format of input file [def: genbank]')
split_parser.add_argument('-o','--outFormat',action='store',\
choices=('embl', 'fasta', 'genbank'),\
help='Format of output file')
split_parser.add_argument ('outputDir', action='store', \
help='Output Directory')
merge_parser = subparsers.add_parser('merge', help='Merges sequence files')
merge_parser.add_argument ('input', action='store', \
help='Input annotation file')
merge_parser.add_argument ('output', action='store', \
help='Output File')
merge_parser.add_argument('-i','--inFormat',action='store',\
choices=('embl', 'fasta', 'genbank'),\
default='genbank', help='Format of input file [def: genbank]')
merge_parser.add_argument('-a','--accession',action='store',\
help='User defined accession no/locus')
merge_parser.add_argument('-e','--ver',action='store',\
help='User defined version no')
merge_parser.add_argument('-o','--outFormat',action='store',\
choices=('embl', 'fasta', 'genbank'),\
help='Format of output file')
convert_parser = subparsers.add_parser('convert', help='Converts sequence files')
convert_parser.add_argument ('input', action='store', \
help='Input annotation file')
convert_parser.add_argument ('output', action='store', \
help='Output File')
convert_parser.add_argument('-i','--inFormat',action='store',\
choices=('abi', 'ace', 'clustal', 'embl', 'fasta', 'fastq',\
'fastq-solexa','fastq-illumina','genbank','ig','imgt','nexus',\
'phd','phylip','pir','seqxml','sff','stockholm','swiss','tab'\
'qual','uniprot-xml'),
default='genbank', help='Format of input file [def: genbank]')
convert_parser.add_argument('-o','--outFormat',action='store',\
choices=('clustal', 'embl', 'fasta', 'fastq','fastq-solexa',\
'fastq-illumina','genbank','imgt','nexus', 'phd','phylip',\
'seqxml','sff','stockholm','tab', 'qual', 'gff'),\
default='fasta', help=('Format of output file [def: fasta]. '
'If gff -i (--inFormat) must be '
'genbank or embl'))
split_parser.set_defaults(func=splitMod)
convert_parser.set_defaults(func=convertMod)
merge_parser.set_defaults(func=mergeMod)
args = parser.parse_args()
args.func(args)
if args.verbose: print "Executing @ " + time.asctime()
if args.verbose: print "Ended @ " + time.asctime()
if args.verbose: print 'total time in minutes:',
if args.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
| happykhan/seqhandler | SeqHandler/SeqHandler.py | Python | gpl-3.0 | 12,655 | [
"Biopython"
] | cd2db408178c4f7a60a2b1c57ec9d9a5e8c875f9da72bf2c5dc0d44c2b0846e5 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
###############################################################################
# Imports
###############################################################################
from __future__ import print_function
import os
import sys
import glob
import warnings
import functools
import operator
from argparse import ArgumentParser
import numpy as np
import mdtraj as md
from mdtraj.core.trajectory import _parse_topology
from mdtraj.utils import in_units_of
from mdtraj.utils.six import iteritems
###############################################################################
# Crappy class that should go elsewhere
###############################################################################
###############################################################################
# Globals
###############################################################################
formats = {'.dcd': md.formats.DCDTrajectoryFile,
'.xtc': md.formats.XTCTrajectoryFile,
'.trr': md.formats.TRRTrajectoryFile,
'.binpos': md.formats.BINPOSTrajectoryFile,
'.nc': md.formats.NetCDFTrajectoryFile,
'.netcdf': md.formats.NetCDFTrajectoryFile,
'.h5': md.formats.HDF5TrajectoryFile,
'.lh5': md.formats.LH5TrajectoryFile,
'.pdb': md.formats.PDBTrajectoryFile}
fields = {'.trr': ('xyz', 'time', 'step', 'box', 'lambda'),
'.xtc': ('xyz', 'time', 'step', 'box'),
'.dcd': ('xyz', 'cell_lengths', 'cell_angles'),
'.nc': ('xyz', 'time', 'cell_lengths', 'cell_angles'),
'.netcdf': ('xyz', 'time', 'cell_lengths', 'cell_angles'),
'.binpos': ('xyz',),
'.lh5': ('xyz', 'topology'),
'.h5': ('xyz', 'time', 'cell_lengths', 'cell_angles',
'velocities', 'kineticEnergy', 'potentialEnergy',
'temperature', 'lambda', 'topology'),
'.pdb': ('xyz', 'topology', 'cell_angles', 'cell_lengths')}
units = {'.xtc': 'nanometers',
'.trr': 'nanometers',
'.binpos': 'angstroms',
'.nc': 'angstroms',
'.netcdf': 'angstroms',
'.dcd': 'angstroms',
'.h5': 'nanometers',
'.lh5': 'nanometers',
'.pdb': 'angstroms'}
###############################################################################
# Utility Functions
###############################################################################
ext = lambda fn: os.path.splitext(fn)[1]
class _Warner(object):
def __init__(self):
self.active = True
def __call__(self, msg):
if self.active:
print('Warning:', msg, file=sys.stderr)
warn = _Warner()
def index(str):
if str.count(':') == 0:
return int(str)
elif str.count(':') == 1:
start, end = [(None if e == '' else int(e)) for e in str.split(':')]
step = None
elif str.count(':') == 2:
start, end, step = [(None if e == '' else int(e)) for e in str.split(':')]
return slice(start, end, step)
###############################################################################
# Code
###############################################################################
def parse_args():
"""Parse the command line arguments and perform some validation on the
arguments
Returns
-------
args : argparse.Namespace
The namespace containing the arguments
"""
extensions = ', '.join(list(formats.keys()))
parser = ArgumentParser(description='''Convert molecular dynamics
trajectories between formats. The DCD, XTC, TRR, PDB, binpos, NetCDF,
binpos, LH5, and HDF5 formats are supported (%s)''' % extensions)
parser.add_argument('input', nargs='+', help='''path to one or more
trajectory files. Multiple trajectories, if supplied, will
be concatenated together in the output file in the order
supplied. all of the trajectories should be in the same
format. the format will be detected based on the file
extension''')
required = parser.add_argument_group('required arguments')
required.add_argument('-o', '--output', required=True,
help='''path to the save the output. the output
format will chosen based on the file extension
(%s)''' % extensions)
# dirty hack to move the 'optional arguments' group to the end. such that
# the 'required arguments' group shows up before it.
parser._action_groups.append(parser._action_groups.pop(1))
parser.add_argument('-c', '--chunk', default=1000, type=int,
help='''number of frames to read in at once. this
determines the memory requirements of this code.
default=1000''')
parser.add_argument('-f', '--force', action='store_true',
help='''force overwrite if output already exsits''')
parser.add_argument('-s', '--stride', default=1, type=int, help='''load
only every stride-th frame from the input file(s),
to subsample.''')
parser.add_argument('-i', '--index', type=index, help='''load a *specific*
set of frames. flexible, but inefficient for a large
trajectory. specify your selection using (pythonic)
"slice notation" e.g. '-i N' to load the the Nth
frame, '-i -1' will load the last frame, '-i N:M to
load frames N to M, etc. see http://bit.ly/143kloq
for details on the notation''')
parser.add_argument('-a', '--atom_indices', type=str,
help='''load only specific atoms from the input file(s).
provide a path to file containing a space, tab or
newline separated list of the (zero-based) integer
indices corresponding to the atoms you wish to keep.''')
parser.add_argument('-t', '--topology', type=str, help='''path to a
PDB/prmtop file. this will be used to parse the topology
of the system. it's optional, but useful. if specified,
it enables you to output the coordinates of your
dcd/xtc/trr/netcdf/binpos as a PDB file. If you\'re
converting *to* .h5, the topology will be stored
inside the h5 file.''')
args = parser.parse_args()
if not args.force and os.path.exists(args.output):
parser.error('file exists: %s' % args.output)
# rebuild the input list, doing any glob expansions
# necessary
input = []
for fn in args.input:
if not os.path.exists(fn):
if '*' in fn:
input.extend(glob.glob(fn))
else:
parser.error('No such file: %s' % fn)
elif os.path.isdir(fn):
parser.error('%s: Is a directory' % fn)
elif not os.path.isfile(fn):
parser.error('%s: Is not a file' % fn)
else:
input.append(fn)
args.input = input
for fn in args.input:
if not ext(fn) in formats:
parser.error("%s: '%s' is not a known extension" % (fn, ext(fn)))
extensions = list(map(ext, args.input))
if any(e != extensions[0] for e in extensions):
parser.error("all input trajectories do not have the same extension")
if not ext(args.output) in formats:
parser.error("%s: '%s' is not a known extension" % (args.output,
ext(args.output)))
if args.atom_indices is not None and not os.path.isfile(args.atom_indices):
parser.error('no such file: %s' % args.atom_indices)
if args.stride <= 0:
parser.error('stride must be positive')
if args.chunk <= 0:
parser.error('chunk must be positive')
if args.index and len(args.input) > 1:
parser.error('index notation only allowed with a single input trajectory')
if args.index and args.stride != 1:
parser.error('stride and index selections are incompatible')
if args.index is not None:
args.chunk = None
if args.topology is not None and not os.path.isfile(args.topology):
parser.error('no such file: %s' % args.topology)
if ((args.topology is None and not all(ext(e) in ['.h5', '.lh5', '.pdb'] for e in args.input))
and ext(args.output) in ['.h5', '.lh5', '.pdb']):
parser.error('to output a %s file, you need to supply a topology (-t, or --topology)' % ext(args.output))
if args.chunk is not None and (args.chunk % args.stride != 0):
parser.error('--stride must be a divisor of --chunk')
return args
def main(args, verbose=True):
"""Run the main script.
Parameters
----------
args : argparse.Namespace
The collected command line arguments
"""
if args.atom_indices is not None:
atom_indices = np.loadtxt(args.atom_indices, int)
else:
atom_indices = None
out_x = ext(args.output)
out_units = units[out_x]
out_fields = fields[out_x]
OutFileFormat = formats[out_x]
in_x = ext(args.input[0])
InFileFormat = formats[in_x]
if args.topology is not None:
topology = _parse_topology(args.topology)
else:
topology = None
if topology is not None and atom_indices is not None:
topology = topology.subset(atom_indices)
n_total = 0
if args.index is not None:
assert len(args.input) == 1
# when chunk is None, we load up ALL of the frames. this isn't
# strictly necessary, and it costs more memory, but it's ALOT
# harder to get the code correct when we need to use data[start:end]
# notation when all of the data isn't loaded up at once. it's easy
# for hdf5 and netcdf, but for the others...
assert args.chunk is None
# this is the normal invocation pattern, but for PDBTrajectoryFile it's
# different
outfile_factory = functools.partial(OutFileFormat, args.output, 'w',
force_overwrite=args.force)
with outfile_factory() as outfile:
for fn in args.input:
assert in_x == ext(fn)
with InFileFormat(fn, 'r') as infile:
while True:
data, in_units, n_frames = read(infile, args.chunk, stride=args.stride,
atom_indices=atom_indices)
if n_frames == 0:
break
if topology is not None:
# if the user supplied a topology, we should probably
# do some simple checks
if data['xyz'].shape[1] != topology._numAtoms:
warnings.warn('sdsfsd!!!!')
data['topology'] = topology
# if they want a specific set of frames, get those
# with slice notation
if args.index is not None:
_data = {}
for k, v in iteritems(data):
if isinstance(v, np.ndarray):
# we don't want the dimensionality to go deficient
if isinstance(args.index, int):
_data[k] = v[np.newaxis, args.index]
else:
_data[k] = v[args.index]
elif isinstance(v, md.Topology):
_data[k] = v
else:
raise RuntineError()
data = _data
print(list(data.keys()))
n_frames = len(data['xyz'])
convert(data, in_units, out_units, out_fields)
write(outfile, data)
n_total += n_frames
if verbose:
sys.stdout.write('\rconverted %d frames, %d atoms' % (n_total, data['xyz'].shape[1]))
sys.stdout.flush()
if verbose:
print(' ')
def write(outfile, data):
"""Write data out to a file
This is a small wrapper around the native write() method on the
XXXTRajectoryFile objects that is necessary to make sure we pass the
right arguments in the right position
Parameters
----------
outfile : TrajectoryFile
An open trajectory file with a write() method
data : dict
A dict with the data to write in it.
"""
if isinstance(outfile, md.formats.XTCTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('step', None), data.get('box', None))
elif isinstance(outfile, md.formats.TRRTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('step', None), data.get('box', None),
data.get('lambd', None))
elif isinstance(outfile, md.formats.DCDTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('cell_lengths', None),
data.get('cell_angles', None))
elif isinstance(outfile, md.formats.BINPOSTrajectoryFile):
outfile.write(data.get('xyz', None))
elif isinstance(outfile, md.formats.PDBTrajectoryFile):
lengths, angles = None, None
for i, frame in enumerate(data.get('xyz')):
if 'cell_lengths' in data:
lengths = data['cell_lengths'][i]
if 'cell_angles' in data:
angles = data['cell_angles'][i]
outfile.write(frame, data.get('topology', None), i, lengths, angles)
elif isinstance(outfile, md.formats.NetCDFTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('cell_lengths', None), data.get('cell_angles', None))
elif isinstance(outfile, md.formats.HDF5TrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('cell_lengths', None), data.get('cell_angles', None),
data.get('velocities', None), data.get('kineticEnergy', None),
data.get('potentialEnergy', None), data.get('temperature', None),
data.get('lambda', None))
if outfile.topology is None:
# only want to write the topology once if we're chunking
outfile.topology = data.get('topology', None)
elif isinstance(outfile, md.formats.LH5TrajectoryFile):
outfile.write(data.get('xyz', None))
if outfile.topology is None:
# only want to write the topology once if we're chunking
outfile.topology = data.get('topology', None)
else:
raise RuntimeError()
def read(infile, chunk, stride, atom_indices):
"""Read data from the infile.
This is a small wrapper around the read() method on the XXXTrajectoryFile
that performs the read and then puts the results in a little dict. It also
returns the distance units that the file uses.
"""
if not isinstance(infile, md.formats.PDBTrajectoryFile):
_data = infile.read(chunk, stride=stride, atom_indices=atom_indices)
if isinstance(infile, md.formats.PDBTrajectoryFile):
if infile.closed:
# signal that we're done reading this pdb
return None, None, 0
if atom_indices is None:
atom_indices = slice(None)
topology = infile.topology
else:
topology = infile.topology.subset(atom_indices)
data = {'xyz': infile.positions[::stride, atom_indices, :],
'topology': topology}
if infile.unitcell_lengths is not None:
data['cell_lengths'] =np.array([infile.unitcell_lengths] * len(data['xyz']))
data['cell_angles'] = np.array([infile.unitcell_angles] * len(data['xyz']))
in_units = 'angstroms'
infile.close()
elif isinstance(infile, md.formats.XTCTrajectoryFile):
data = dict(zip(fields['.xtc'], _data))
in_units = 'nanometers'
elif isinstance(infile, md.formats.TRRTrajectoryFile):
data = dict(zip(fields['.trr'], _data))
in_units = 'nanometers'
elif isinstance(infile, md.formats.DCDTrajectoryFile):
data = dict(zip(fields['.dcd'], _data))
in_units = 'angstroms'
elif isinstance(infile, md.formats.BINPOSTrajectoryFile):
data = {'xyz': _data}
in_units = 'angstroms'
elif isinstance(infile, md.formats.NetCDFTrajectoryFile):
data = dict(zip(fields['.nc'], _data))
in_units = 'angstroms'
elif isinstance(infile, md.formats.HDF5TrajectoryFile):
data = dict(zip(fields['.h5'], _data))
data['topology'] = infile.topology # need to hack this one in manually
if atom_indices is not None:
data['topology'] = data['topology'].subset(atom_indices)
in_units = 'nanometers'
elif isinstance(infile, md.formats.LH5TrajectoryFile):
data = {'xyz': _data}
data['topology'] = infile.topology # need to hack this one in manually
if atom_indices is not None:
data['topology'] = data['topology'].subset(atom_indices)
in_units = 'nanometers'
else:
raise RuntimeError
data = dict((k, v) for k, v in data.items() if v is not None)
return data, in_units, (0 if 'xyz' not in data else len(data['xyz']))
def convert(data, in_units, out_units, out_fields):
# do unit conversion
if 'xyz' in out_fields and 'xyz' in data:
data['xyz'] = in_units_of(data['xyz'], in_units, out_units, inplace=True)
if 'box' in out_fields:
if 'box' in data:
data['box'] = in_units_of(data['box'], in_units, out_units, inplace=True)
elif 'cell_angles' in data and 'cell_lengths' in data:
a, b, c = data['cell_lengths'].T
alpha, beta, gamma = data['cell_angles'].T
data['box'] = np.dstack(md.utils.unitcell.lengths_and_angles_to_box_vectors(a, b, c, alpha, beta, gamma))
data['box'] = in_units_of(data['box'], in_units, out_units, inplace=True)
del data['cell_lengths']
del data['cell_angles']
if 'cell_lengths' in out_fields:
if 'cell_lengths' in data:
data['cell_lengths'] = in_units_of(data['cell_lengths'], in_units, out_units, inplace=True)
elif 'box' in data:
a, b, c, alpha, beta, gamma = md.utils.unitcell.box_vectors_to_lengths_and_angles(data['box'][:, 0], data['box'][:, 1], data['box'][:, 2])
data['cell_lengths'] = np.vstack((a, b, c)).T
data['cell_angles'] = np.vstack((alpha, beta, gamma)).T
data['cell_lengths'] = in_units_of(data['cell_lengths'], in_units, out_units, inplace=True)
del data['box']
ignored_keys = ["'%s'" % s for s in set(data) - set(out_fields)]
formated_fields = ', '.join("'%s'" % o for o in out_fields)
if len(ignored_keys) > 0:
warn('%s data from input file(s) will be discarded. '
'output format only supports fields: %s' % (', '.join(ignored_keys),
formated_fields))
warn.active = False
return data
def entry_point():
args = parse_args()
main(args)
if __name__ == '__main__':
entry_point()
| leeping/mdtraj | mdtraj/scripts/mdconvert.py | Python | lgpl-2.1 | 20,690 | [
"MDTraj",
"NetCDF"
] | 537d7b4efc4847fa466f7aecf5e69c759fe7a86e99b642cc00f0472bd41786ab |
# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the :func:`iris.experimental.ugrid.ugrid` function.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris.tests as tests
import unittest
# Import pyugrid if installed, else fail quietly + disable all the tests.
try:
import pyugrid
except (ImportError, AttributeError):
pyugrid = None
skip_pyugrid = unittest.skipIf(
condition=pyugrid is None,
reason='Requires pyugrid, which is not available.')
import iris.experimental.ugrid
data_path = ("NetCDF", "ugrid", )
file21 = "21_triangle_example.nc"
long_name = "volume flux between cells"
@skip_pyugrid
@tests.skip_data
class TestUgrid(tests.IrisTest):
def test_ugrid(self):
path = tests.get_data_path(data_path + (file21, ))
cube = iris.experimental.ugrid.ugrid(path, long_name)
self.assertTrue(hasattr(cube, 'mesh'))
if __name__ == "__main__":
tests.main()
| QuLogic/iris | lib/iris/tests/experimental/ugrid/test_ugrid.py | Python | gpl-3.0 | 1,683 | [
"NetCDF"
] | 8d49856ed61d5a29a21028da0f4fc341eb22e02b524d9b1d84942831b16b2fa1 |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['monitor'] = """
type: group
short-summary: Manage the Azure Monitor Service.
"""
helps['monitor action-group'] = """
type: group
short-summary: Manage action groups
"""
helps['monitor action-group create'] = """
type: command
short-summary: Create a new action group
parameters:
- name: --action -a
short-summary: Add receivers to the action group during the creation
long-summary: |
Usage: --action TYPE NAME [ARG ...]
Email:
Format: --action email NAME EMAIL_ADDRESS [usecommonalertschema]
Example: --action email bob [email protected]
SMS:
Format: --action sms NAME COUNTRY_CODE PHONE_NUMBER
Example: --action sms charli 1 5551234567
Webhook:
Format: --action webhook NAME URI [useaadauth OBJECT_ID IDENTIFIER URI] [usecommonalertschema]
Example: --action webhook alert_hook https://www.contoso.com/alert useaadauth testobj http://identifier usecommonalertschema
Arm Role:
Format: --action armrole NAME ROLE_ID [usecommonalertschema]
Example: --action armole owner_role 8e3af657-a8ff-443c-a75c-2fe8c4bcb635
Azure App Push:
Format: --action azureapppush NAME EMAIL_ADDRESS
Example: --action azureapppush test_apppush [email protected]
ITSM:
Format: --action itsm NAME WORKSPACE_ID CONNECTION_ID TICKET_CONFIGURATION REGION
Example: --action itsm test_itsm test_workspace test_conn ticket_blob useast
Automation runbook:
Format: --action automationrunbook NAME AUTOMATION_ACCOUNT_ID RUNBOOK_NAME WEBHOOK_RESOURCE_ID SERVICE_URI [isglobalrunbook] [usecommonalertschema]
Example: --action automationrunbook test_runbook test_acc test_book test_webhook test_rsrc http://example.com isglobalrunbook usecommonalertschema
Voice:
Format: --action voice NAME COUNTRY_CODE PHONE_NUMBER
Example: --action voice charli 1 4441234567
Logic App:
Format: --action logicapp NAME RESOURCE_ID CALLBACK_URL [usecommonalertschema]
Example: --action logicapp test_logicapp test_rsrc http://callback
Azure Function:
Format: --action azurefunction NAME FUNCTION_APP_RESOURCE_ID FUNCTION_NAME HTTP_TRIGGER_URL [usecommonalertschema]
Example: --action azurefunction test_function test_rsrc test_func http://trigger usecommonalertschema
Event Hub:
Format: --action eventhub NAME SUBSCRIPTION_ID EVENT_HUB_NAME_SPACE EVENT_HUB_NAME [usecommonalertschema]
Example: --action eventhub test_eventhub 5def922a-3ed4-49c1-b9fd-05ec533819a3 eventhubNameSpace testEventHubName usecommonalertschema
Multiple actions can be specified by using more than one `--add-action` argument.
'useaadauth', 'isglobalrunbook' and 'usecommonalertschema' are optional arguements that only need to be passed to set the respective parameter to True.
If the 'useaadauth' argument is passed, then the OBJECT_ID and IDENTIFIER_URI values are required as well.
- name: --short-name
short-summary: The short name of the action group
examples:
- name: Create a new action group (autogenerated)
text: |
az monitor action-group create --action webhook https://alerts.contoso.com apiKey={APIKey} type=HighCPU --name MyActionGroup --resource-group MyResourceGroup
crafted: true
"""
helps['monitor action-group list'] = """
type: command
short-summary: List action groups under a resource group or the current subscription
parameters:
- name: --resource-group -g
type: string
short-summary: >
Name of the resource group under which the action groups are being listed. If it is omitted, all the action groups under
the current subscription are listed.
"""
helps['monitor action-group show'] = """
type: command
short-summary: Show the details of an action group
examples:
- name: Show the details of an action group (commonly used with --output and --query). (autogenerated)
text: |
az monitor action-group show --name MyActionGroup --resource-group MyResourceGroup
crafted: true
"""
helps['monitor action-group update'] = """
type: command
short-summary: Update an action group
parameters:
- name: --short-name
short-summary: Update the group short name of the action group
- name: --add-action -a
short-summary: Add receivers to the action group
long-summary: |
Usage: --add-action TYPE NAME [ARG ...]
Email:
Format: --add-action email NAME EMAIL_ADDRESS [usecommonalertschema]
Example: --add-action email bob [email protected]
SMS:
Format: --add-action sms NAME COUNTRY_CODE PHONE_NUMBER
Example: --add-action sms charli 1 5551234567
Webhook:
Format: --add-action webhook NAME URI [useaadauth OBJECT_ID IDENTIFIER URI] [usecommonalertschema]
Example: --add-action https://www.contoso.com/alert useaadauth testobj http://identifier usecommonalertschema
Arm Role:
Format: --add-action armrole NAME ROLE_ID [usecommonalertschema]
Example: --add-action armole owner_role 8e3af657-a8ff-443c-a75c-2fe8c4bcb635
Azure App Push:
Format: --add-action azureapppush NAME EMAIL_ADDRESS
Example: --add-action azureapppush test_apppush [email protected]
ITSM:
Format: --add-action itsm NAME WORKSPACE_ID CONNECTION_ID TICKET_CONFIGURATION REGION
Example: --add-action itsm test_itsm test_workspace test_conn ticket_blob useast
Automation runbook:
Format: --add-action automationrunbook NAME AUTOMATION_ACCOUNT_ID RUNBOOK_NAME WEBHOOK_RESOURCE_ID SERVICE_URI [isglobalrunbook] [usecommonalertschema]
Example: --add-action automationrunbook test_runbook test_acc test_book test_webhook test_rsrc http://example.com isglobalrunbook usecommonalertschema
Voice:
Format: --add-action voice NAME COUNTRY_CODE PHONE_NUMBER
Example: --add-action voice charli 1 4441234567
Logic App:
Format: --add-action logicapp NAME RESOURCE_ID CALLBACK_URL [usecommonalertschema]
Example: --add-action logicapp test_logicapp test_rsrc http://callback
Azure Function:
Format: --add-action azurefunction NAME FUNCTION_APP_RESOURCE_ID FUNCTION_NAME HTTP_TRIGGER_URL [usecommonalertschema]
Example: --add-action azurefunction test_function test_rsrc test_func http://trigger usecommonalertschema
Event Hub:
Format: --action eventhub NAME SUBSCRIPTION_ID EVENT_HUB_NAME_SPACE EVENT_HUB_NAME [usecommonalertschema]
Example: --action eventhub test_eventhub 5def922a-3ed4-49c1-b9fd-05ec533819a3 eventhubNameSpace testEventHubName usecommonalertschema
Multiple actions can be specified by using more than one `--add-action` argument.
'useaadauth', 'isglobalrunbook' and 'usecommonalertschema' are optional arguements that only need to be passed to set the respective parameter to True.
If the 'useaadauth' argument is passed, then the OBJECT_ID and IDENTIFIER_URI values are required as well.
- name: --remove-action -r
short-summary: Remove receivers from the action group. Accept space-separated list of receiver names.
examples:
- name: Update an action group (autogenerated)
text: |
az monitor action-group update --name MyActionGroup --resource-group MyResourceGroup --set retentionPolicy.days=365 --subscription MySubscription
crafted: true
"""
helps['monitor action-group enable-receiver'] = """
type: command
short-summary: Enable a receiver in an action group.
long-summary: This changes the receiver's status from Disabled to Enabled. This operation is only supported for Email or SMS receivers.
"""
helps['monitor activity-log'] = """
type: group
short-summary: Manage activity logs.
"""
helps['monitor activity-log alert'] = """
type: group
short-summary: Manage activity log alerts
"""
helps['monitor activity-log alert action-group'] = """
type: group
short-summary: Manage action groups for activity log alerts
"""
helps['monitor activity-log alert action-group add'] = """
type: command
short-summary: Add action groups to this activity log alert. It can also be used to overwrite existing webhook properties of particular action groups.
parameters:
- name: --name -n
short-summary: Name of the activity log alerts
- name: --action-group -a
short-summary: The names or the resource ids of the action groups to be added.
- name: --reset
short-summary: Remove all the existing action groups before add new conditions.
- name: --webhook-properties -w
short-summary: >
Space-separated webhook properties in 'key[=value]' format. These properties will be associated with
the action groups added in this command.
long-summary: >
For any webhook receiver in these action group, these data are appended to the webhook payload.
To attach different webhook properties to different action groups, add the action groups in separate update-action commands.
- name: --strict
short-summary: Fails the command if an action group to be added will change existing webhook properties.
examples:
- name: Add an action group and specify webhook properties.
text: |
az monitor activity-log alert action-group add -n {AlertName} -g {ResourceGroup} \\
--action /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/microsoft.insights/actionGroups/{ActionGroup} \\
--webhook-properties usage=test owner=jane
- name: Overwite an existing action group's webhook properties.
text: |
az monitor activity-log alert action-group add -n {AlertName} -g {ResourceGroup} \\
-a /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/microsoft.insights/actionGroups/{ActionGroup} \\
--webhook-properties usage=test owner=john
- name: Remove webhook properties from an existing action group.
text: |
az monitor activity-log alert action-group add -n {AlertName} -g {ResourceGroup} \\
-a /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/microsoft.insights/actionGroups/{ActionGroup}
- name: Add new action groups but prevent the command from accidently overwrite existing webhook properties
text: |
az monitor activity-log alert action-group add -n {AlertName} -g {ResourceGroup} --strict \\
--action-group {ResourceIDList}
"""
helps['monitor activity-log alert action-group remove'] = """
type: command
short-summary: Remove action groups from this activity log alert
parameters:
- name: --name -n
short-summary: Name of the activity log alerts
- name: --action-group -a
short-summary: The names or the resource ids of the action groups to be added.
"""
helps['monitor activity-log alert create'] = """
type: command
short-summary: Create a default activity log alert
long-summary: This command will create a default activity log with one condition which compares if the activities logs 'category' field equals to 'ServiceHealth'. The newly created activity log alert does not have any action groups attached to it.
parameters:
- name: --name -n
short-summary: Name of the activity log alerts
- name: --scope -s
short-summary: A list of strings that will be used as prefixes.
long-summary: >
The alert will only apply to activity logs with resourceIDs that fall under one of these prefixes.
If not provided, the path to the resource group will be used.
- name: --disable
short-summary: Disable the activity log alert after it is created.
- name: --description
short-summary: A description of this activity log alert
- name: --condition -c
short-summary: The condition that will cause the alert to activate. The format is FIELD=VALUE[ and FIELD=VALUE...].
long-summary: >
The possible values for the field are 'resourceId', 'category', 'caller', 'level', 'operationName', 'resourceGroup',
'resourceProvider', 'status', 'subStatus', 'resourceType', or anything beginning with 'properties.'.
- name: --action-group -a
short-summary: >
Add an action group. Accepts space-separated action group identifiers. The identifier can be the action group's name
or its resource ID.
- name: --webhook-properties -w
short-summary: >
Space-separated webhook properties in 'key[=value]' format. These properties are associated with the action groups
added in this command.
long-summary: >
For any webhook receiver in these action group, this data is appended to the webhook payload. To attach different webhook
properties to different action groups, add the action groups in separate update-action commands.
examples:
- name: Create an alert with default settings.
text: >
az monitor activity-log alert create -n {AlertName} -g {ResourceGroup}
- name: Create an alert with condition about error level service health log.
text: >
az monitor activity-log alert create -n {AlertName} -g {ResourceGroup} \\
--condition category=ServiceHealth and level=Error
- name: Create an alert with an action group and specify webhook properties.
text: >
az monitor activity-log alert create -n {AlertName} -g {ResourceGroup} \\
-a /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/microsoft.insights/actionGroups/{ActionGroup} \\
-w usage=test owner=jane
- name: Create an alert which is initially disabled.
text: >
az monitor activity-log alert create -n {AlertName} -g {ResourceGroup} --disable
"""
helps['monitor activity-log alert list'] = """
type: command
short-summary: List activity log alerts under a resource group or the current subscription.
parameters:
- name: --resource-group -g
short-summary: Name of the resource group under which the activity log alerts are being listed. If it is omitted, all the activity log alerts under the current subscription are listed.
"""
helps['monitor activity-log alert scope'] = """
type: group
short-summary: Manage scopes for activity log alerts
"""
helps['monitor activity-log alert scope add'] = """
type: command
short-summary: Add scopes to this activity log alert.
parameters:
- name: --name -n
short-summary: Name of the activity log alerts
- name: --scope -s
short-summary: List of scopes to add. Each scope could be a resource ID, a resource group ID or a subscription ID.
- name: --reset
short-summary: Remove all the existing scopes before add new scopes.
examples:
- name: Add scopes to this activity log alert. (autogenerated)
text: |
az monitor activity-log alert scope add --name MyActivityLogAlerts --resource-group MyResourceGroup --scope /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myRG /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myRG/Microsoft.KeyVault/vaults/mykey
crafted: true
"""
helps['monitor activity-log alert scope remove'] = """
type: command
short-summary: Removes scopes from this activity log alert.
parameters:
- name: --name -n
short-summary: Name of the activity log alerts
- name: --scope -s
short-summary: The scopes to remove
"""
helps['monitor activity-log alert update'] = """
type: command
short-summary: Update the details of this activity log alert
parameters:
- name: --description
short-summary: A description of this activity log alert.
- name: --condition -c
short-summary: The conditional expression that will cause the alert to activate. The format is FIELD=VALUE[ and FIELD=VALUE...].
long-summary: >
The possible values for the field are 'resourceId', 'category', 'caller', 'level', 'operationName', 'resourceGroup',
'resourceProvider', 'status', 'subStatus', 'resourceType', or anything beginning with 'properties.'.
examples:
- name: Update the condition
text: >
az monitor activity-log alert update -n {AlertName} -g {ResourceGroup} \\
--condition category=ServiceHealth and level=Error
- name: Disable an alert
text: >
az monitor activity-log alert update -n {AlertName} -g {ResourceGroup} --enable false
- name: Update the details of this activity log alert (autogenerated)
text: |
az monitor activity-log alert update --enabled true --name MyActivityLogAlerts --resource-group MyResourceGroup --subscription MySubscription
crafted: true
- name: Update the details of this activity log alert. (autogenerated)
text: |
az monitor activity-log alert update --name MyActivityLogAlerts --resource-group MyResourceGroup --tags key=value
crafted: true
"""
helps['monitor activity-log list'] = """
type: command
short-summary: List and query activity log events.
parameters:
- name: --correlation-id
short-summary: Correlation ID to query.
- name: --resource-id
short-summary: ARM ID of a resource.
- name: --namespace
short-summary: Resource provider namespace.
- name: --caller
short-summary: Caller to query for, such as an e-mail address or service principal ID.
- name: --status
short-summary: >
Status to query for (ex: Failed)
- name: --max-events
short-summary: Maximum number of records to return.
- name: --select
short-summary: Space-separated list of properties to return.
- name: --offset
short-summary: >
Time offset of the query range, in ##d##h format.
long-summary: >
Can be used with either --start-time or --end-time. If used with --start-time, then
the end time will be calculated by adding the offset. If used with --end-time (default), then
the start time will be calculated by subtracting the offset. If --start-time and --end-time are
provided, then --offset will be ignored.
examples:
- name: List all events from July 1st, looking forward one week.
text: az monitor activity-log list --start-time 2018-07-01 --offset 7d
- name: List events within the past six hours based on a correlation ID.
text: az monitor activity-log list --correlation-id b5eac9d2-e829-4c9a-9efb-586d19417c5f
- name: List events within the past hour based on resource group.
text: az monitor activity-log list -g {ResourceGroup} --offset 1h
"""
helps['monitor activity-log list-categories'] = """
type: command
short-summary: List the event categories of activity logs.
"""
helps['monitor alert'] = """
type: group
short-summary: Manage classic metric-based alert rules.
"""
helps['monitor alert create'] = """
type: command
short-summary: Create a classic metric-based alert rule.
parameters:
- name: --action -a
short-summary: Add an action to fire when the alert is triggered.
long-summary: |
Usage: --action TYPE KEY [ARG ...]
Email: --action email [email protected] [email protected]
Webhook: --action webhook https://www.contoso.com/alert apiKey=value
Webhook: --action webhook https://www.contoso.com/alert?apiKey=value
Multiple actions can be specified by using more than one `--action` argument.
- name: --description
short-summary: Free-text description of the rule. Defaults to the condition expression.
- name: --disabled
short-summary: Create the rule in a disabled state.
- name: --condition
short-summary: The condition which triggers the rule.
long-summary: >
The form of a condition is "METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} PERIOD".
Values for METRIC and appropriate THRESHOLD values can be obtained from `az monitor metric` commands,
and PERIOD is of the form "##h##m##s".
- name: --email-service-owners
short-summary: Email the service owners if an alert is triggered.
examples:
- name: Create a high CPU usage alert on a VM with no actions.
text: >
az monitor alert create -n rule1 -g {ResourceGroup} --target {VirtualMachineID} --condition "Percentage CPU > 90 avg 5m"
- name: Create a high CPU usage alert on a VM with email and webhook actions.
text: |
az monitor alert create -n rule1 -g {ResourceGroup} --target {VirtualMachineID} \\
--condition "Percentage CPU > 90 avg 5m" \\
--action email [email protected] [email protected] --email-service-owners \\
--action webhook https://www.contoso.com/alerts?type=HighCPU \\
--action webhook https://alerts.contoso.com apiKey={APIKey} type=HighCPU
"""
helps['monitor alert delete'] = """
type: command
short-summary: Delete an alert rule.
examples:
- name: Delete an alert rule. (autogenerated)
text: |
az monitor alert delete --name MyAlertRule --resource-group MyResourceGroup
crafted: true
"""
helps['monitor alert list'] = """
type: command
short-summary: List alert rules in a resource group.
examples:
- name: List alert rules in a resource group. (autogenerated)
text: |
az monitor alert list --resource-group MyResourceGroup
crafted: true
"""
helps['monitor alert list-incidents'] = """
type: command
short-summary: List all incidents for an alert rule.
examples:
- name: List all incidents for an alert rule. (autogenerated)
text: |
az monitor alert list-incidents --resource-group MyResourceGroup --rule-name MyRule
crafted: true
"""
helps['monitor alert show'] = """
type: command
short-summary: Show an alert rule.
examples:
- name: Show an alert rule. (autogenerated)
text: |
az monitor alert show --name MyAlertRule --resource-group MyResourceGroup
crafted: true
"""
helps['monitor alert show-incident'] = """
type: command
short-summary: Get the details of an alert rule incident.
"""
helps['monitor alert update'] = """
type: command
short-summary: Update a classic metric-based alert rule.
parameters:
- name: --description
short-summary: Description of the rule.
- name: --condition
short-summary: The condition which triggers the rule.
long-summary: >
The form of a condition is "METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} PERIOD".
Values for METRIC and appropriate THRESHOLD values can be obtained from `az monitor metric` commands,
and PERIOD is of the form "##h##m##s".
- name: --add-action -a
short-summary: Add an action to fire when the alert is triggered.
long-summary: |
Usage: --add-action TYPE KEY [ARG ...]
Email: --add-action email [email protected] [email protected]
Webhook: --add-action webhook https://www.contoso.com/alert apiKey=value
Webhook: --add-action webhook https://www.contoso.com/alert?apiKey=value
Multiple actions can be specified by using more than one `--add-action` argument.
- name: --remove-action -r
short-summary: Remove one or more actions.
long-summary: |
Usage: --remove-action TYPE KEY [KEY ...]
Email: --remove-action email [email protected] [email protected]
Webhook: --remove-action webhook https://contoso.com/alert https://alerts.contoso.com
- name: --email-service-owners
short-summary: Email the service owners if an alert is triggered.
- name: --metric
short-summary: Name of the metric to base the rule on.
populator-commands:
- az monitor metrics list-definitions
- name: --operator
short-summary: How to compare the metric against the threshold.
- name: --threshold
short-summary: Numeric threshold at which to trigger the alert.
- name: --aggregation
short-summary: Type of aggregation to apply based on --period.
- name: --period
short-summary: >
Time span over which to apply --aggregation, in nDnHnMnS shorthand or full ISO8601 format.
examples:
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --email-service-owners true --name MyAlertRule --resource-group MyResourceGroup
crafted: true
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --name MyAlertRule --remove-action email [email protected] --resource-group MyResourceGroup
crafted: true
- name: Update a classic metric-based alert rule. (autogenerated)
text: |
az monitor alert update --name MyAlertRule --resource-group MyResourceGroup --set retentionPolicy.days=365
crafted: true
"""
helps['monitor autoscale'] = """
type: group
short-summary: Manage autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale create'] = """
type: command
short-summary: Create new autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --action -a
short-summary: Add an action to fire when a scaling event occurs.
long-summary: |
Usage: --action TYPE KEY [ARG ...]
Email: --action email [email protected] [email protected]
Webhook: --action webhook https://www.contoso.com/alert apiKey=value
Webhook: --action webhook https://www.contoso.com/alert?apiKey=value
Multiple actions can be specified by using more than one `--action` argument.
examples:
- name: Create autoscale settings to scale between 2 and 5 instances (3 as default). Email the administrator when scaling occurs.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --max-count 5 \\
--count 3 --email-administrator
az monitor autoscale rule create -g {myrg} --autoscale-name {resource-name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {resource-name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
- name: Create autoscale settings for exactly 4 instances.
text: >
az monitor autoscale create -g {myrg} --resource {resource-id} --count 4
- name: Create new autoscale settings. (autogenerated)
text: |
az monitor autoscale create --count 3 --max-count 5 --min-count 2 --name MyAutoscaleSettings --resource myScaleSet --resource-group MyResourceGroup --resource-type Microsoft.Compute/virtualMachineScaleSets
crafted: true
"""
helps['monitor autoscale profile'] = """
type: group
short-summary: Manage autoscaling profiles.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale profile create'] = """
type: command
short-summary: Create a fixed or recurring autoscale profile.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --timezone
short-summary: Timezone name.
populator-commands:
- az monitor autoscale profile list-timezones
- name: --recurrence -r
short-summary: When the profile recurs. If omitted, a fixed (non-recurring) profile is created.
long-summary: |
Usage: --recurrence {week} [ARG ARG ...]
Weekly: --recurrence week Sat Sun
- name: --start
short-summary: When the autoscale profile begins. Format depends on the type of profile.
long-summary: |
Fixed: --start yyyy-mm-dd [hh:mm:ss]
Weekly: [--start hh:mm]
- name: --end
short-summary: When the autoscale profile ends. Format depends on the type of profile.
long-summary: |
Fixed: --end yyyy-mm-dd [hh:mm:ss]
Weekly: [--end hh:mm]
examples:
- name: Create a fixed date profile, inheriting the default scaling rules but changing the capacity.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --count 3 \\
--max-count 5
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
az monitor autoscale profile create -g {myrg} --autoscale-name {name} -n Christmas \\
--copy-rules default --min-count 3 --count 6 --max-count 10 --start 2018-12-24 \\
--end 2018-12-26 --timezone "Pacific Standard Time"
- name: Create a recurring weekend profile, inheriting the default scaling rules but changing the capacity.
text: |
az monitor autoscale create -g {myrg} --resource {resource-id} --min-count 2 --count 3 \\
--max-count 5
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale out 1 \\
--condition "Percentage CPU > 75 avg 5m"
az monitor autoscale rule create -g {myrg} --autoscale-name {name} --scale in 1 \\
--condition "Percentage CPU < 25 avg 5m"
az monitor autoscale profile create -g {myrg} --autoscale-name {name} -n weeekend \\
--copy-rules default --min-count 1 --count 2 --max-count 2 \\
--recurrence week sat sun --timezone "Pacific Standard Time"
- name: Create a fixed or recurring autoscale profile. (autogenerated)
text: |
az monitor autoscale profile create --autoscale-name MyAutoscale --copy-rules default --count 2 --end 2018-12-26 --max-count 10 --min-count 1 --name Christmas --recurrence week sat sun --resource-group MyResourceGroup --start 2018-12-24 --timezone "Pacific Standard Time"
crafted: true
- name: Create a fixed or recurring autoscale profile. (autogenerated)
text: |
az monitor autoscale profile create --autoscale-name MyAutoscale --count 2 --max-count 10 --min-count 1 --name Christmas --recurrence week sat sun --resource-group MyResourceGroup --start 2018-12-24 --subscription MySubscription --timezone "Pacific Standard Time"
crafted: true
"""
helps['monitor autoscale profile delete'] = """
type: command
short-summary: Delete an autoscale profile.
examples:
- name: Delete an autoscale profile. (autogenerated)
text: |
az monitor autoscale profile delete --autoscale-name MyAutoscale --name MyAutoscaleProfile --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale profile list'] = """
type: command
short-summary: List autoscale profiles.
examples:
- name: List autoscale profiles. (autogenerated)
text: |
az monitor autoscale profile list --autoscale-name MyAutoscale --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale profile list-timezones'] = """
type: command
short-summary: Look up time zone information.
"""
helps['monitor autoscale profile show'] = """
type: command
short-summary: Show details of an autoscale profile.
"""
helps['monitor autoscale rule'] = """
type: group
short-summary: Manage autoscale scaling rules.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
"""
helps['monitor autoscale rule copy'] = """
type: command
short-summary: Copy autoscale rules from one profile to another.
"""
helps['monitor autoscale rule create'] = """
type: command
short-summary: Add a new autoscale rule.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --condition
short-summary: The condition which triggers the scaling action.
long-summary: >
Usage: --condition ["NAMESPACE"] METRIC {==,!=,>,>=,<,<=} THRESHOLD
{avg,min,max,total,count} PERIOD
[where DIMENSION {==,!=} VALUE [or VALUE ...]
[and DIMENSION {==,!=} VALUE [or VALUE ...] ...]]
Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
Values for METRIC and appropriate THRESHOLD values can be obtained from the `az monitor metric` command.
Format of PERIOD is "##h##m##s".
- name: --scale
short-summary: The direction and amount to scale.
long-summary: |
Usage: --scale {to,in,out} VAL[%]
Fixed Count: --scale to 5
In by Count: --scale in 2
Out by Percent: --scale out 10%
- name: --timegrain
short-summary: >
The way metrics are polled across instances.
long-summary: >
The form of the timegrain is {avg,min,max,sum} VALUE. Values can be obtained from the `az monitor metric` command.
Format of VALUE is "##h##m##s".
examples:
- name: Scale to 5 instances when the CPU Percentage across instances is greater than 75 averaged over 10 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale to 5 --condition "Percentage CPU > 75 avg 10m"
- name: Scale up 2 instances when the CPU Percentage across instances is greater than 75 averaged over 5 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale out 2 --condition "Percentage CPU > 75 avg 5m"
- name: Scale down 50% when the CPU Percentage across instances is less than 25 averaged over 15 minutes.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name {myvmss} \\
--scale in 50% --condition "Percentage CPU < 25 avg 15m"
- name: Create autoscale settings via a guest vm metric enabled from diagnostic extensions.
You can use counterSpecifier field retrieved from 'az vmss diagnostics get-default-config' in the `--condition`.
text: |
az monitor autoscale rule create -g {myrg} --autoscale-name test --scale out 1 --condition "/builtin/memory/percentavailablememory > 80 total 5m"
"""
helps['monitor autoscale rule delete'] = """
type: command
short-summary: Remove autoscale rules from a profile.
"""
helps['monitor autoscale rule list'] = """
type: command
short-summary: List autoscale rules for a profile.
examples:
- name: List autoscale rules for a profile. (autogenerated)
text: |
az monitor autoscale rule list --autoscale-name MyAutoscale --profile-name MyProfile --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale show'] = """
type: command
short-summary: Show autoscale setting details.
examples:
- name: Show autoscale setting details. (autogenerated)
text: |
az monitor autoscale show --name MyAutoscaleSettings --resource-group MyResourceGroup
crafted: true
"""
helps['monitor autoscale update'] = """
type: command
short-summary: Update autoscale settings.
long-summary: >
For more information on autoscaling, visit: https://docs.microsoft.com/azure/monitoring-and-diagnostics/monitoring-understanding-autoscale-settings
parameters:
- name: --add-action -a
short-summary: Add an action to fire when a scaling event occurs.
long-summary: |
Usage: --add-action TYPE KEY [ARG ...]
Email: --add-action email [email protected] [email protected]
Webhook: --add-action webhook https://www.contoso.com/alert apiKey=value
Webhook: --add-action webhook https://www.contoso.com/alert?apiKey=value
Multiple actions can be specified by using more than one `--add-action` argument.
- name: --remove-action -r
short-summary: Remove one or more actions.
long-summary: |
Usage: --remove-action TYPE KEY [KEY ...]
Email: --remove-action email [email protected] [email protected]
Webhook: --remove-action webhook https://contoso.com/alert https://alerts.contoso.com
examples:
- name: Update autoscale settings to use a fixed 3 instances by default.
text: |
az monitor autoscale update -g {myrg} -n {autoscale-name} --count 3
- name: Update autoscale settings to remove an email notification.
text: |
az monitor autoscale update -g {myrg} -n {autoscale-name} \\
--remove-action email [email protected]
- name: Update autoscale settings. (autogenerated)
text: |
az monitor autoscale update --count 3 --email-administrator true --enabled true --max-count 5 --min-count 2 --name MyAutoscaleSettings --resource-group MyResourceGroup --tags key[=value]
crafted: true
"""
helps['monitor autoscale-settings'] = """
type: group
short-summary: Manage autoscale settings.
"""
helps['monitor autoscale-settings update'] = """
type: command
short-summary: Updates an autoscale setting.
examples:
- name: Updates an autoscale setting. (autogenerated)
text: |
az monitor autoscale-settings update --name MyAutoscaleSetting --resource-group MyResourceGroup --set retentionPolicy.days=365
crafted: true
"""
helps['monitor diagnostic-settings'] = """
type: group
short-summary: Manage service diagnostic settings.
"""
helps['monitor diagnostic-settings categories'] = """
type: group
short-summary: Retrieve service diagnostic settings categories.
"""
helps['monitor diagnostic-settings categories list'] = """
type: command
short-summary: List the diagnostic settings categories for the specified resource.
examples:
- name: List diagnostic settings categories by using resource ID
text: az monitor diagnostic-settings categories list --resource /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myRG/providers/microsoft.logic/workflows/myWorkflow
- name: List diagnostic settings categories by using resource name
text: az monitor diagnostic-settings categories list -g myRG --resource-type microsoft.logic/workflows --resource myWorkflow
"""
helps['monitor diagnostic-settings create'] = """
type: command
short-summary: Create diagnostic settings for the specified resource.
long-summary: >
For more information, visit: https://docs.microsoft.com/rest/api/monitor/diagnosticsettings/createorupdate#metricsettings
parameters:
- name: --name -n
short-summary: The name of the diagnostic settings.
- name: --resource-group -g
type: string
short-summary: Name of the resource group for the Log Analytics and Storage Account when the name of the service instead of a full resource ID is given.
- name: --logs
type: string
short-summary: JSON encoded list of logs settings. Use '@{file}' to load from a file.
- name: --metrics
type: string
short-summary: JSON encoded list of metric settings. Use '@{file}' to load from a file.
- name: --storage-account
type: string
short-summary: Name or ID of the storage account to send diagnostic logs to.
- name: --workspace
type: string
short-summary: Name or ID of the Log Analytics workspace to send diagnostic logs to.
- name: --event-hub
type: string
short-summary: >
Name or ID an event hub. If none is specified, the default event hub will be selected.
- name: --event-hub-rule
short-summary: Name or ID of the event hub authorization rule.
examples:
- name: Create diagnostic settings with EventHub.
text: |
az monitor diagnostic-settings create --resource {ID} -n {name}
--event-hub-rule {eventHubRuleID} --storage-account {storageAccount}
--logs '[
{
"category": "WorkflowRuntime",
"enabled": true,
"retentionPolicy": {
"enabled": false,
"days": 0
}
}
]'
--metrics '[
{
"category": "WorkflowRuntime",
"enabled": true,
"retentionPolicy": {
"enabled": false,
"days": 0
}
}
]'
"""
helps['monitor diagnostic-settings update'] = """
type: command
short-summary: Update diagnostic settings.
examples:
- name: Update diagnostic settings. (autogenerated)
text: |
az monitor diagnostic-settings update --name MyDiagnosticSetting --resource myScaleSet --set retentionPolicy.days=365
crafted: true
"""
helps['monitor diagnostic-settings subscription'] = """
type: group
short-summary: Manage diagnostic settings for subscription.
"""
helps['monitor diagnostic-settings subscription create'] = """
type: command
short-summary: Create diagnostic settings for a subscription
examples:
- name: Create diagnostic settings for a subscription with EventHub.
text: |
az monitor diagnostic-settings subscription create -n {name} --location westus --event-hub-auth-rule {eventHubRuleID} --storage-account {storageAccount} \\
--logs '[
{
"category": "Security",
"enabled": true
},
{
"category": "Administrative",
"enabled": true
},
{
"category": "ServiceHealth",
"enabled": true
},
{
"category": "Alert",
"enabled": true
},
{
"category": "Recommendation",
"enabled": true
},
{
"category": "Policy",
"enabled": true
},
{
"category": "Autoscale",
"enabled": true
},
{
"category": "ResourceHealth",
"enabled": true
}
]'
"""
helps['monitor diagnostic-settings subscription update'] = """
type: command
short-summary: Update diagnostic settings for a subscription.
"""
helps['monitor log-analytics'] = """
type: group
short-summary: Manage Azure log analytics.
"""
helps['monitor log-analytics cluster'] = """
type: group
short-summary: Manage Azure log analytics cluster.
"""
helps['monitor log-analytics cluster create'] = """
type: command
short-summary: Create a cluster instance.
examples:
- name: Create a cluster instance.
text: az monitor log-analytics cluster create -g MyResourceGroup -n MyCluster --sku-capacity 1000
"""
helps['monitor log-analytics cluster update'] = """
type: command
short-summary: Update a cluster instance.
examples:
- name: Update a cluster instance.
text: |
az monitor log-analytics cluster update -g MyResourceGroup -n MyCluster \\
--key-vault-uri https://myvault.vault.azure.net/ --key-name my-key \\
--key-version fe0adcedd8014aed9c22e9aefb81a1ds --sku-capacity 1000
"""
helps['monitor log-analytics cluster delete'] = """
type: command
short-summary: Delete a cluster instance.
examples:
- name: Delete a cluster instance.
text: az monitor log-analytics cluster delete -g MyResourceGroup -n MyCluster
"""
helps['monitor log-analytics cluster show'] = """
type: command
short-summary: Show the properties of a cluster instance.
examples:
- name: Show the properties of a cluster instance.
text: az monitor log-analytics cluster show -g MyResourceGroup -n MyCluster
"""
helps['monitor log-analytics cluster list'] = """
type: command
short-summary: Gets all cluster instances in a resource group or in current subscription.
examples:
- name: Gets all cluster instances in a resource group.
text: az monitor log-analytics cluster list -g MyResourceGroup
- name: Gets all cluster instances in current subscription.
text: az monitor log-analytics cluster list
"""
helps['monitor log-analytics cluster wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the cluster is met.
examples:
- name: Pause executing next line of CLI script until the cluster is successfully provisioned.
text: az monitor log-analytics cluster wait -n MyCluster -g MyResourceGroup --created
"""
helps['monitor log-analytics workspace'] = """
type: group
short-summary: Manage Azure log analytics workspace
"""
helps['monitor log-analytics workspace create'] = """
type: command
short-summary: Create a workspace instance
examples:
- name: Create a workspace instance
text: az monitor log-analytics workspace create -g MyResourceGroup -n MyWorkspace
"""
helps['monitor log-analytics workspace delete'] = """
type: command
short-summary: Delete a workspace instance.
examples:
- name: Delete a workspace instance. (autogenerated)
text: |
az monitor log-analytics workspace delete --force true --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace get-schema'] = """
type: command
short-summary: Get the schema for a given workspace.
long-summary: >
Schema represents the internal structure of the workspace, which can be used during the query.
For more information, visit: https://docs.microsoft.com/rest/api/loganalytics/workspaces%202015-03-20/getschema
examples:
- name: Get the schema for a given workspace. (autogenerated)
text: |
az monitor log-analytics workspace get-schema --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace get-shared-keys'] = """
type: command
short-summary: Get the shared keys for a workspace.
examples:
- name: Get the shared keys for a workspace. (autogenerated)
text: |
az monitor log-analytics workspace get-shared-keys --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace list'] = """
type: command
short-summary: Get a list of workspaces under a resource group or a subscription.
"""
helps['monitor log-analytics workspace list-deleted-workspaces'] = """
type: command
short-summary: Get a list of deleted workspaces that can be recovered in a subscription or a resource group.
examples:
- name: Get a list of deleted workspaces that can be recovered in a resource group
text: |
az monitor log-analytics workspace list-deleted-workspaces --resource-group MyResourceGroup
"""
helps['monitor log-analytics workspace recover'] = """
type: command
short-summary: Recover a workspace in a soft-delete state within 14 days.
examples:
- name: Recover a workspace in a soft-delete state within 14 days
text: |
az monitor log-analytics workspace recover --resource-group MyResourceGroup -n MyWorkspace
"""
helps['monitor log-analytics workspace list-management-groups'] = """
type: command
short-summary: Get a list of management groups connected to a workspace.
examples:
- name: Get a list of management groups connected to a workspace. (autogenerated)
text: |
az monitor log-analytics workspace list-management-groups --resource-group MyResourceGroup --subscription mysubscription --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace list-usages'] = """
type: command
short-summary: Get a list of usage metrics for a workspace.
examples:
- name: Get a list of usage metrics for a workspace. (autogenerated)
text: |
az monitor log-analytics workspace list-usages --resource-group MyResourceGroup --subscription MySubscription --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace table'] = """
type: group
short-summary: Manage tables for log analytics workspace.
"""
helps['monitor log-analytics workspace table list'] = """
type: command
short-summary: List all the tables for the given Log Analytics workspace.
examples:
- name: List all the tables for the given Log Analytics workspace
text: |
az monitor log-analytics workspace table list --resource-group MyResourceGroup --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace table show'] = """
type: command
short-summary: Get a Log Analytics workspace table.
examples:
- name: Get a Log Analytics workspace table
text: |
az monitor log-analytics workspace table show --resource-group MyResourceGroup --workspace-name MyWorkspace -n MyTable
"""
helps['monitor log-analytics workspace table update'] = """
type: command
short-summary: Update the properties of a Log Analytics workspace table, currently only support updating retention time.
examples:
- name: Update the retention time of a Log Analytics workspace table
text: |
az monitor log-analytics workspace table update --resource-group MyResourceGroup --workspace-name MyWorkspace -n MyTable --retention-time 30
"""
helps['monitor log-analytics workspace pack'] = """
type: group
short-summary: Manage intelligent packs for log analytics workspace.
"""
helps['monitor log-analytics workspace pack disable'] = """
type: command
short-summary: Disable an intelligence pack for a given workspace.
"""
helps['monitor log-analytics workspace pack enable'] = """
type: command
short-summary: Enable an intelligence pack for a given workspace.
examples:
- name: Enable an intelligence pack for a given workspace. (autogenerated)
text: |
az monitor log-analytics workspace pack enable --name MyIntelligencePack --resource-group MyResourceGroup --subscription mysubscription --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace pack list'] = """
type: command
short-summary: List all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
examples:
- name: List all the intelligence packs possible and whether they are enabled or disabled for a given workspace. (autogenerated)
text: |
az monitor log-analytics workspace pack list --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace show'] = """
type: command
short-summary: Show a workspace instance.
examples:
- name: Show a workspace instance. (autogenerated)
text: |
az monitor log-analytics workspace show --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace update'] = """
type: command
short-summary: Update a workspace instance
examples:
- name: Update a workspace instance. (autogenerated)
text: |
az monitor log-analytics workspace update --resource-group myresourcegroup --retention-time 30 --workspace-name myworkspace
crafted: true
"""
helps['monitor log-analytics workspace linked-service'] = """
type: group
short-summary: Manage linked service for log analytics workspace.
long-summary: |
Linked services is used to defined a relation from the workspace to another Azure resource. Log Analytics and Azure resources then leverage this connection in their operations. Example uses of Linked Services in Log Analytics workspace are Automation account and workspace association to CMK.
"""
helps['monitor log-analytics workspace linked-service create'] = """
type: command
short-summary: Create a linked service.
examples:
- name: Create a linked service.
text: |
az monitor log-analytics workspace linked-service create -g MyResourceGroup -n cluster \\
--workspace-name MyWorkspace --write-access-resource-id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/clusters/MyCluster
"""
helps['monitor log-analytics workspace linked-service update'] = """
type: command
short-summary: Update a linked service.
examples:
- name: Update a linked service.
text: |
az monitor log-analytics workspace linked-service update -g MyResourceGroup -n cluster \\
--workspace-name MyWorkspace --write-access-resource-id /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/clusters/MyCluster
"""
helps['monitor log-analytics workspace linked-service show'] = """
type: command
short-summary: Show the properties of a linked service.
examples:
- name: Show the properties of a linked service.
text: |
az monitor log-analytics workspace linked-service show -g MyResourceGroup -n cluster --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace linked-service delete'] = """
type: command
short-summary: Delete a linked service.
examples:
- name: Delete a linked service.
text: |
az monitor log-analytics workspace linked-service delete -g MyResourceGroup -n cluster --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace linked-service list'] = """
type: command
short-summary: Gets all the linked services in a workspace.
examples:
- name: Gets all the linked services in a workspace.
text: |
az monitor log-analytics workspace linked-service list -g MyResourceGroup --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace linked-service wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the linked service is met.
examples:
- name: Pause executing next line of CLI script until the linked service is successfully provisioned.
text: az monitor log-analytics workspace linked-service wait -n cluster -g MyResourceGroup --workspace-name MyWorkspace --created
"""
helps['monitor log-analytics workspace linked-storage'] = """
type: group
short-summary: Manage linked storage account for log analytics workspace.
"""
helps['monitor log-analytics workspace linked-storage create'] = """
type: command
short-summary: Create some linked storage accounts for log analytics workspace.
examples:
- name: Create two linked storage accounts for a log analytics workspace using the name of the storage account.
text: az monitor log-analytics workspace linked-storage create --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts StorageAccount1 StorageAccount2
- name: Create one linked storage accounts for a log analytics workspace using the resource id of the storage account.
text: az monitor log-analytics workspace linked-storage create --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/cli000001
- name: Create some linked storage accounts for log analytics workspace. (autogenerated)
text: |
az monitor log-analytics workspace linked-storage create --resource-group MyResourceGroup --storage-accounts /subscriptions/00000000-0000-0000-0000-00000000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/cli000001 --subscription mysubscription --type CustomLogs --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace linked-storage delete'] = """
type: command
short-summary: Delete all linked storage accounts with specific data source type for log analytics workspace.
examples:
- name: Delete all linked storage accounts with a specific type for a log analytics workspace
text: az monitor log-analytics workspace linked-storage delete --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace linked-storage add'] = """
type: command
short-summary: Add some linked storage accounts with specific data source type for log analytics workspace.
examples:
- name: Add two linked storage accounts for a log analytics workspace using the name of the storage account.
text: az monitor log-analytics workspace linked-storage add --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts StorageAccount1 StorageAccount2
- name: Add one linked storage accounts for a log analytics workspace using the resource id of the storage account.
text: az monitor log-analytics workspace linked-storage add --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/cli000001
"""
helps['monitor log-analytics workspace linked-storage remove'] = """
type: command
short-summary: Remove some linked storage accounts with specific data source type for log analytics workspace
examples:
- name: Remove two linked storage accounts for a log analytics workspace using the name of the storage account.
text: az monitor log-analytics workspace linked-storage remove --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts StorageAccount1 StorageAccount2
- name: Remove one linked storage accounts for a log analytics workspace using the resource id of the storage account.
text: az monitor log-analytics workspace linked-storage remove --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace --storage-accounts /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/cli000001
"""
helps['monitor log-analytics workspace linked-storage list'] = """
type: command
short-summary: List all linked storage accounts for a log analytics workspace.
examples:
- name: List all linked storage accounts for a log analytics workspace. (autogenerated)
text: |
az monitor log-analytics workspace linked-storage list --resource-group MyResourceGroup --workspace-name MyWorkspace
crafted: true
"""
helps['monitor log-analytics workspace linked-storage show'] = """
type: command
short-summary: List all linked storage accounts with specific data source type for a log analytics workspace.
examples:
- name: Show all linked storage accounts with a specific type for a log analytics workspace
text: az monitor log-analytics workspace linked-storage show --type AzureWatson -g MyResourceGroup --workspace-name MyWorkspace
"""
helps['monitor log-analytics workspace saved-search'] = """
type: group
short-summary: Manage saved search for log analytics workspace.
"""
helps['monitor log-analytics workspace saved-search create'] = """
type: command
short-summary: Create a saved search for a given workspace.
examples:
- name: Create a saved search for a given workspace.
text: az monitor log-analytics workspace saved-search create -g MyRG --workspace-name MyWS -n MySavedSearch --category Test1 --display-name TestSavedSearch -q "AzureActivity | summarize count() by bin(TimeGenerated, 1h)" --fa myfun --fp "a:string = value"
"""
helps['monitor log-analytics workspace saved-search update'] = """
type: command
short-summary: Update a saved search for a given workspace.
examples:
- name: Update a saved search for a given workspace.
text: az monitor log-analytics workspace saved-search update -g MyRG --workspace-name MyWS -n MySavedSearch --category Test1 --display-name TestSavedSearch -q "AzureActivity | summarize count() by bin(TimeGenerated, 1h)" --fa myfun --fp "a:string = value"
"""
helps['monitor log-analytics workspace saved-search list'] = """
type: command
short-summary: List all saved searches for a given workspace.
"""
helps['monitor log-analytics workspace saved-search show'] = """
type: command
short-summary: Show a saved search for a given workspace.
"""
helps['monitor log-analytics workspace saved-search delete'] = """
type: command
short-summary: Delete a saved search for a given workspace.
"""
helps['monitor log-analytics workspace data-export'] = """
type: group
short-summary: Manage data export ruls for log analytics workspace.
"""
helps['monitor log-analytics workspace data-export create'] = """
type: command
short-summary: Create a data export rule for a given workspace.
long-summary: |
For more information, see
https://docs.microsoft.com/azure/azure-monitor/platform/logs-data-export.
parameters:
- name: --tables -t
short-summary: An array of tables to export.
populator-commands:
- "`az monitor log-analytics workspace table list`"
examples:
- name: Create a data export rule for a given workspace.
text: az monitor log-analytics workspace data-export create -g MyRG --workspace-name MyWS -n MyDataExport --destination {sa_id_1} --enable -t {table_name}
"""
helps['monitor log-analytics workspace data-export update'] = """
type: command
short-summary: Update a data export rule for a given workspace.
long-summary: |
For more information, see
https://docs.microsoft.com/azure/azure-monitor/platform/logs-data-export.
parameters:
- name: --tables -t
short-summary: An array of tables to export.
populator-commands:
- "`az monitor log-analytics workspace table list`"
examples:
- name: Update a data export rule for a given workspace.
text: az monitor log-analytics workspace data-export update -g MyRG --workspace-name MyWS -n MyDataExport --destination {namespace_id} -t {table_name} --enable false
"""
helps['monitor log-analytics workspace data-export list'] = """
type: command
short-summary: List all data export ruleses for a given workspace.
"""
helps['monitor log-analytics workspace data-export show'] = """
type: command
short-summary: Show a data export rule for a given workspace.
"""
helps['monitor log-analytics workspace data-export delete'] = """
type: command
short-summary: Delete a data export rule for a given workspace.
"""
helps['monitor log-profiles'] = """
type: group
short-summary: Manage log profiles.
"""
helps['monitor log-profiles create'] = """
type: command
short-summary: Create a log profile.
parameters:
- name: --name -n
short-summary: The name of the log profile.
- name: --locations
short-summary: Space-separated list of regions for which Activity Log events should be stored.
- name: --categories
short-summary: Space-separated categories of the logs. These categories are created as is convenient to the user. Some values are Write, Delete, and/or Action.
- name: --storage-account-id
short-summary: The resource id of the storage account to which you would like to send the Activity Log.
- name: --service-bus-rule-id
short-summary: The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming the Activity Log. The rule ID is of the format '{service bus resource ID}/authorizationrules/{key name}'.
- name: --days
short-summary: The number of days for the retention in days. A value of 0 will retain the events indefinitely
- name: --enabled
short-summary: Whether the retention policy is enabled.
examples:
- name: Create a log profile. (autogenerated)
text: |
az monitor log-profiles create --categories "Delete" --days 0 --enabled true --location westus2 --locations westus --name MyLogProfile --service-bus-rule-id "/subscriptions/{YOUR SUBSCRIPTION ID}/resourceGroups/{RESOURCE GROUP NAME}/providers/Microsoft.EventHub/namespaces/{EVENT HUB NAME SPACE}/authorizationrules/RootManageSharedAccessKey"
crafted: true
"""
helps['monitor log-profiles update'] = """
type: command
short-summary: Update a log profile.
examples:
- name: Update a log profile. (autogenerated)
text: |
az monitor log-profiles update --name MyLogProfile --set retentionPolicy.days=365
crafted: true
"""
helps['monitor metrics'] = """
type: group
short-summary: View Azure resource metrics.
"""
helps['monitor metrics alert'] = """
type: group
short-summary: Manage near-realtime metric alert rules.
"""
helps['monitor metrics alert create'] = """
type: command
short-summary: Create a metric-based alert rule.
parameters:
- name: --action -a
short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.
long-summary: |
Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]
Multiple action groups can be specified by using more than one `--action` argument.
- name: --disabled
short-summary: Create the rule in a disabled state.
- name: --condition
short-summary: The condition which triggers the rule.
It can be created by 'az monitor metrics alert condition create' command.
long-summary: |
Usage: --condition {avg,min,max,total,count} [NAMESPACE.]METRIC
[{=,!=,>,>=,<,<=} THRESHOLD]
[{<,>,><} dynamic SENSITIVITY VIOLATIONS of EVALUATIONS [since DATETIME]]
[where DIMENSION {includes,excludes} VALUE [or VALUE ...]
[and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]
[with skipmetricvalidation]
Sensitivity can be 'low', 'medium', 'high'.
Violations can be the number of violations to trigger an alert. It should be smaller or equal to evaluation.
Evaluations can be the number of evaluation periods for dynamic threshold.
Datetime can be the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format).
Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
Values for METRIC, DIMENSION and appropriate THRESHOLD values can be obtained from `az monitor metrics list-definitions` command.
Due to server limitation, when an alert rule contains multiple criterias, the use of dimensions is limited to one value per dimension within each criterion.
Multiple conditions can be specified by using more than one `--condition` argument.
examples:
- name: Create a high CPU usage alert on a VM with no action.
text: >
az monitor metrics alert create -n alert1 -g {ResourceGroup} --scopes {VirtualMachineID} --condition "avg Percentage CPU > 90" --description "High CPU"
- name: Create a high CPU usage alert on a VM with email and webhook actions.
text: |
az monitor metrics alert create -n alert1 -g {ResourceGroup} --scopes {VirtualMachineID} \\
--condition "avg Percentage CPU > 90" --window-size 5m --evaluation-frequency 1m \\
--action "/subscriptions/<subscriptionId>/resourceGroups/<resourceGroupName>/providers/Microsoft.Insights/actionGroups/<actionGroupName>" apiKey={APIKey} type=HighCPU \\
--description "High CPU"
- name: Create an alert when a storage account shows a high number of slow transactions, using multi-dimensional filters.
text: |
az monitor metrics alert create -g {ResourceGroup} -n alert1 --scopes {StorageAccountId} \\
--description "Storage Slow Transactions" \\
--condition "total transactions > 5 where ResponseType includes Success" \\
--condition "avg SuccessE2ELatency > 250 where ApiName includes GetBlob"
- name: Create a metric-based alert rule that monitors a custom metric.
text: |
az monitor metrics alert create -n "metric alert rule on a custom metric" -g "Demos" --scopes {VirtualMachineID} \\
--condition "max Azure.VM.Windows.GuestMetrics.Memory\\Available Bytes > 90" \\
--window-size 5m --evaluation-frequency 1m
- name: Create a high CPU usage alert on several VMs with no actions.
text: |
az monitor metrics alert create -n alert1 -g {ResourceGroup} --scopes {VirtualMachineID1} {VirtualMachineID2} {VirtualMachineID3} \\
--condition "avg Percentage CPU > 90" --description "High CPU" --region westus
- name: Create a dynamic CPU usage alert on several VMs with no actions.
text: |
az monitor metrics alert create -n alert1 -g {ResourceGroup} --scopes {VirtualMachineID1} {VirtualMachineID2} {VirtualMachineID3} \\
--condition "avg Percentage CPU > dynamic medium 2 of 4 since 2020-10-01T10:23:00.000Z"
--description "Dynamic CPU"
--window-size 5m
--region westus
"""
helps['monitor metrics alert dimension'] = """
type: group
short-summary: Manage near-realtime metric alert rule dimensions.
"""
helps['monitor metrics alert dimension create'] = """
type: command
short-summary: Build a metric alert rule dimension.
examples:
- name: Build a metric alert rule dimension.
text: |
$dim = az monitor metrics alert dimension create -n dimName --op Include -v GetBlob PutBlob
"""
helps['monitor metrics alert condition'] = """
type: group
short-summary: Manage near-realtime metric alert rule conditions.
"""
helps['monitor metrics alert condition create'] = """
type: command
short-summary: Build a metric alert rule condition.
parameters:
- name: --metric
short-summary: Name of the metric to base the rule on.
populator-commands:
- az monitor metrics list-definitions
examples:
- name: Build a static condition.
text: |
$dim1 = az monitor metrics alert dimension create -n dimName --op Include -v GetBlob PutBlob
$dim2 = az monitor metrics alert dimension create -n Instance --op Exclude -v Get Put
$condition = az monitor metrics alert condition create -t static \n
--aggregation Count \n
--metric "CPU Percentage" \n
--op GreaterThan \n
--threshold 95 \n
--dimension "$dim1" "$dim2"
- name: Build a dynamic condition.
text: |
$condition = az monitor metrics alert condition create -t dynamic \n
--aggregation Average \n
--metric "CPU Percentage" \n
--op GreaterOrLessThan \n
--num-violations 4 \n
--num-periods 4 \n
--since 2020-11-02T12:11
"""
helps['monitor metrics alert delete'] = """
type: command
short-summary: Delete a metrics-based alert rule.
examples:
- name: Delete a metrics-based alert rule. (autogenerated)
text: |
az monitor metrics alert delete --name MyAlertRule --resource-group MyResourceGroup
crafted: true
"""
helps['monitor metrics alert list'] = """
type: command
short-summary: List metric-based alert rules.
examples:
- name: List metric-based alert rules. (autogenerated)
text: |
az monitor metrics alert list --resource-group MyResourceGroup
crafted: true
"""
helps['monitor metrics alert show'] = """
type: command
short-summary: Show a metrics-based alert rule.
examples:
- name: Show a metrics-based alert rule. (autogenerated)
text: |
az monitor metrics alert show --name MyAlertRule --resource-group MyResourceGroup
crafted: true
"""
helps['monitor metrics alert update'] = """
type: command
short-summary: Update a metric-based alert rule.
parameters:
- name: --add-condition
short-summary: Add a condition which triggers the rule.
long-summary: |
Usage: --add-condition {avg,min,max,total,count} [NAMESPACE.]METRIC
[{=,!=,>,>=,<,<=} THRESHOLD]
[{<,>,><} dynamic SENSITIVITY VIOLATIONS of EVALUATIONS [since DATETIME]]
[where DIMENSION {includes,excludes} VALUE [or VALUE ...]
[and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]
Sensitivity can be 'low', 'medium', 'high'.
Violations can be the number of violations to trigger an alert. It should be smaller or equal to evaluation.
Evaluations can be the number of evaluation periods for dynamic threshold.
Datetime can be the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format).
Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
Values for METRIC, DIMENSION and appropriate THRESHOLD values can be obtained from `az monitor metrics list-definitions` command.
Due to server limitation, when an alert rule contains multiple criterias, the use of dimensions is limited to one value per dimension within each criterion.
Multiple conditions can be specified by using more than one `--condition` argument.
- name: --remove-conditions
short-summary: Space-separated list of condition names to remove.
- name: --add-action
short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.
long-summary: |
Usage: --add-action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]
Multiple action groups can be specified by using more than one `--action` argument.
- name: --remove-actions
short-summary: Space-separated list of action group names to remove.
examples:
- name: Disable/Enable a metric-based alert rule.
text: |
az monitor metrics alert update --enabled false --name MyAlertRule --resource-group MyResourceGroup
"""
helps['monitor metrics list'] = """
type: command
short-summary: List the metric values for a resource.
parameters:
- name: --aggregation
short-summary: The list of aggregation types (space-separated) to retrieve.
populator-commands:
- az monitor metrics list-definitions
- name: --interval
short-summary: >
The interval over which to aggregate metrics, in ##h##m format.
- name: --filter
short-summary: A string used to reduce the set of metric data returned. eg. "BlobType eq '*'"
long-summary: 'For a full list of filters, see the filter string reference at https://docs.microsoft.com/rest/api/monitor/metrics/list'
- name: --metadata
short-summary: Returns the metadata values instead of metric data
- name: --dimension
short-summary: The list of dimensions (space-separated) the metrics are queried into.
populator-commands:
- az monitor metrics list-definitions
- name: --namespace
short-summary: Namespace to query metric definitions for.
populator-commands:
- az monitor metrics list-namespaces
- name: --offset
short-summary: >
Time offset of the query range, in ##d##h format.
long-summary: >
Can be used with either --start-time or --end-time. If used with --start-time, then
the end time will be calculated by adding the offset. If used with --end-time (default), then
the start time will be calculated by subtracting the offset. If --start-time and --end-time are
provided, then --offset will be ignored.
- name: --metrics
short-summary: >
Space-separated list of metric names to retrieve.
populator-commands:
- az monitor metrics list-definitions
examples:
- name: List a VM's CPU usage for the past hour
text: >
az monitor metrics list --resource {ResourceName} --metric "Percentage CPU"
- name: List success E2E latency of a storage account and split the data series based on API name
text: >
az monitor metrics list --resource {ResourceName} --metric SuccessE2ELatency \\
--dimension ApiName
- name: List success E2E latency of a storage account and split the data series based on both API name and geo type
text: >
az monitor metrics list --resource {ResourceName} --metric SuccessE2ELatency \\
--dimension ApiName GeoType
- name: List success E2E latency of a storage account and split the data series based on both API name and geo type using "--filter" parameter
text: >
az monitor metrics list --resource {ResourceName} --metric SuccessE2ELatency \\
--filter "ApiName eq '*' and GeoType eq '*'"
- name: List success E2E latency of a storage account and split the data series based on both API name and geo type. Limits the api name to 'DeleteContainer'
text: >
az monitor metrics list --resource {ResourceName} --metric SuccessE2ELatency \\
--filter "ApiName eq 'DeleteContainer' and GeoType eq '*'"
- name: List transactions of a storage account per day since 2017-01-01
text: >
az monitor metrics list --resource {ResourceName} --metric Transactions \\
--start-time 2017-01-01T00:00:00Z \\
--interval PT24H
- name: List the metadata values for a storage account under transaction metric's api name dimension since 2017
text: >
az monitor metrics list --resource {ResourceName} --metric Transactions \\
--filter "ApiName eq '*'" \\
--start-time 2017-01-01T00:00:00Z
"""
helps['monitor metrics list-definitions'] = """
type: command
short-summary: List the metric definitions for the resource.
parameters:
- name: --namespace
short-summary: Namespace to query metric definitions for.
populator-commands:
- az monitor metrics list-namespaces
examples:
- name: List the metric definitions for the resource. (autogenerated)
text: |
az monitor metrics list-definitions --resource /subscriptions/{subscriptionID}/resourceGroups/{resourceGroup}/Microsoft.Network/networkSecurityGroups/{resourceName}
crafted: true
"""
helps['monitor metrics list-namespaces'] = """
type: command
short-summary: List the metric namespaces for the resource.
examples:
- name: List the metric namespaces for the resource.
text: |
az monitor metrics list-namespaces --resource /subscriptions/{subscriptionID}/resourceGroups/{resourceGroup}/Microsoft.Network/networkSecurityGroups/{resourceName} --start-time 2021-03-01T00:00:00Z
"""
helps['monitor clone'] = """
type: command
short-summary: Clone metrics alert rules from one resource to another resource.
examples:
- name: Clone the metric alert settings from one VM to another
text: |
az monitor clone --source-resource /subscriptions/{subscriptionID}/resourceGroups/Space1999/providers/Microsoft.Compute/virtualMachines/vm1 --target-resource /subscriptions/{subscriptionID}/resourceGroups/Space1999/providers/Microsoft.Compute/virtualMachines/vm2
"""
helps['monitor private-link-scope'] = """
type: group
short-summary: Manage monitor private link scope resource.
"""
helps['monitor private-link-scope create'] = """
type: command
short-summary: Create a private link scope resource.
examples:
- name: Create a private link scope resource (autogenerated)
text: |
az monitor private-link-scope create --name MyAzureMonitorPrivateLinkScope --resource-group MyResourceRroup
crafted: true
"""
helps['monitor private-link-scope update'] = """
type: command
short-summary: Update a monitor private link scope resource.
"""
helps['monitor private-link-scope list'] = """
type: command
short-summary: List all monitor private link scope resource.
"""
helps['monitor private-link-scope show'] = """
type: command
short-summary: Show a monitor private link scope resource.
"""
helps['monitor private-link-scope delete'] = """
type: command
short-summary: Delete a monitor private link scope resource.
"""
helps['monitor private-link-scope scoped-resource'] = """
type: group
short-summary: Manage scoped resource of a private link scope resource.
"""
helps['monitor private-link-scope scoped-resource create'] = """
type: command
short-summary: Create a scoped resource for a private link scope resource.
example:
- name: Create a scoped resource for log analytic workspace
text: az monitor private-link-scope scoped-resource create -g MyRG -n ScopedWS --linked-resource /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001/providers/microsoft.operationalinsights/workspaces/clitest000002 --scope-name MyScope
"""
helps['monitor private-link-scope scoped-resource list'] = """
type: command
short-summary: List all scoped resource of a private link scope resource.
"""
helps['monitor private-link-scope scoped-resource show'] = """
type: command
short-summary: Show a scoped resource of a private link scope resource.
"""
helps['monitor private-link-scope scoped-resource delete'] = """
type: command
short-summary: Delete a scoped resource of a private link scope resource.
"""
helps['monitor private-link-scope private-link-resource'] = """
type: group
short-summary: Manage private link resource of a private link scope resource.
"""
helps['monitor private-link-scope private-link-resource list'] = """
type: command
short-summary: List all private link resources of a private link scope resource.
"""
helps['monitor private-link-scope private-link-resource show'] = """
type: command
short-summary: Show a private link resource of a private link scope resource.
"""
helps['monitor private-link-scope private-endpoint-connection'] = """
type: group
short-summary: Manage private endpoint connection of a private link scope resource.
"""
helps['monitor private-link-scope private-endpoint-connection approve'] = """
type: command
short-summary: Approve a private endpoint connection of a private link scope resource.
parameters:
- name: --name -n
short-summary: Name of the private endpoint connection.
populator-commands:
- az monitor private-link-scope show
example:
- name: Approve a private endpoint connection.
text: az monitor private-link-scope private-endpoint-connection approve --scope-name MyScope -g MyRG --name PrivateEndpointConnection
examples:
- name: Approve a private endpoint connection of a private link scope resource. (autogenerated)
text: |
az monitor private-link-scope private-endpoint-connection approve --name MyPrivateEndpointConnection --resource-group MyResourceGroup --scope-name MyScope
crafted: true
"""
helps['monitor private-link-scope private-endpoint-connection reject'] = """
type: command
parameters:
- name: --name -n
short-summary: Name of the private endpoint connection.
populator-commands:
- az monitor private-link-scope show
short-summary: Reject a private endpoint connection of a private link scope resource.
examples:
- name: Reject a private endpoint connection of a private link scope resource. (autogenerated)
text: |
az monitor private-link-scope private-endpoint-connection reject --name MyPrivateEndpointConnection --resource-group MyResourceGroup --scope-name MyScope
crafted: true
"""
helps['monitor private-link-scope private-endpoint-connection show'] = """
type: command
parameters:
- name: --name -n
short-summary: Name of the private endpoint connection.
populator-commands:
- az monitor private-link-scope show
short-summary: Show a private endpoint connection of a private link scope resource.
"""
helps['monitor private-link-scope private-endpoint-connection delete'] = """
type: command
short-summary: Delete a private endpoint connection of a private link scope resource.
"""
helps['monitor private-link-scope private-endpoint-connection list'] = """
type: command
short-summary: List all private endpoint connections of a private link scope resource.
"""
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/monitor/_help.py | Python | mit | 83,661 | [
"VisIt"
] | a4edabfe8b315ff083b8a86ba783aeceead5af067a079c1b9debff71feea902e |
# -*- coding: utf-8 -*-
# This program is public domain
# Author: Paul Kienzle
"""
Neutron scattering factors for the elements and isotopes.
For details of neutron scattering factor values, see :class:`Neutron`.
The property is set to *None* if there is no neutron scattering information
for the element. Individual isotopes may have their own scattering
information.
Example
=======
Print a table of coherent scattering length densities for isotopes
of a particular element:
.. doctest::
>>> import periodictable
>>> for iso in periodictable.Ni:
... if iso.neutron.has_sld():
... print("%s %7.4f"%(iso,iso.neutron.sld()[0]))
58-Ni 13.1526
60-Ni 2.5575
61-Ni 6.9417
62-Ni -7.9464
64-Ni -0.3379
Details
=======
There are a number of functions available in periodictable.nsf
:func:`neutron_energy`
Return neutron energy given wavelength.
:func:`neutron_wavelength`
Return wavelength given neutron energy.
:func:`neutron_wavelength_from_velocity`
Return wavelength given neutron velocity.
:func:`neutron_scattering`
Computes scattering length density, cross sections and
penetration depth for a compound.
:func:`neutron_sld`
Computes scattering length density for a compound.
:func:`neutron_composite_sld`
Returns a scattering length density for a compound whose composition
is variable.
:func:`energy_dependent_table`
Lists isotopes with energy dependence.
:func:`sld_table`
Lists scattering length densitys for all elements in natural abundance.
:func:`absorption_comparison_table`
Compares the imaginary bound coherent scattering length to the
absorption cross section.
:func:`coherent_comparison_table`
Compares the bound coherent scattering length to the
coherent scattering cross section.
:func:`total_comparison_table`
Compares the total scattering cross section to the sum of the
coherent and incoherent scattering cross sections.
For private tables use :func:`init` to set the data.
The neutron scattering information table is reproduced from the Atomic
Institute for Austrian Universities\ [#Rauch2003]_ (retrieve March 2008):
http://www.ati.ac.at/~neutropt/scattering/table.html
The above site has references to the published values for every entry in
the table. We have included these in the documentation directory
associated with the periodictable package.
.. Note:
Enteries in the table have been measured independently, so the values
measured for the scattering length of an element or isotope may be
inconsistent with the values measured for the corresponding cross section.
.. [#Rauch2003] Rauch, H. and Waschkowski, W. (2003)
Neutron Scattering Lengths in ILL
Neutron Data Booklet (second edition), A.-J. Dianoux, G. Lander, Eds.
Old City Publishing, Philidelphia, PA. pp 1.1-1 to 1.1-17.
.. [#Rauch2000] Rauch, H. and Waschkowski, W. (2000)
Neutron scattering lengths. Schopper, H. (ed.).
SpringerMaterials - The Landolt-Börnstein Database (http://www.springermaterials.com).
doi: 10.1007/10499706_6
.. [#Koester1991] Koester, L., Rauch, H., Seymann. E. (1991)
Atomic Data Nuclear Data Tables 49, 65
.. [#Lynn1990] Lynn, J.E. and Seeger, P.A. (1990)
Resonance effects in neutron scattering lengths of rare-earth nuclides.
Atomic Data and Nuclear Data Tables 44, 191-207.
.. [#Sears1999] Sears, V. F. (1999)
4.4.4 Scattering lengths for neutrons.
In Wilson & Prince eds. Intl. Tables for Crystallography C
Kluwer Academic Publishers. pp 448-449.
.. [#Sears1992] Sears, V.F. (1992)
Neutron scattering lengths and cross sections.
Neutron News 3, No. 3, 26-37.
.. [#May1982] May, R.P., Ibel, K. and Haas, J. (1982)
The forward scattering of cold neutrons by mixtures of light and heavy water.
J. Appl. Cryst. 15, 15-19.
.. [#Smith2006] Smith, G.S. and Majkrzak, C.M. (2006)
2.9 Neutron reflectometry.
In E. Prince ed. Intl. Tables for Crystallography C
Wiley InterScience. pp 126-146.
doi: 10.1107/97809553602060000584
.. [#Glinka2011] Glinka, C.J. (2011)
Incoherent Neutron Scattering from Multi-element Materials.
J. Appl. Cryst. 44, 618-624.
doi: 10.1107/S0021889811008223
"""
import numpy
from numpy import sqrt, pi, asarray, inf
from .core import Element, Isotope, default_table
from .constants import (avogadro_number, plancks_constant, electron_volt,
neutron_mass, atomic_mass_constant)
from .util import require_keywords
__all__ = ['init', 'Neutron',
'neutron_energy', 'neutron_wavelength',
'neutron_wavelength_from_velocity',
'neutron_scattering', 'neutron_sld', 'neutron_composite_sld',
'sld_plot',
'absorption_comparison_table', 'coherent_comparison_table',
'incoherent_comparison_table', 'total_comparison_table',
'energy_dependent_table', 'sld_table',
'neutron_sld_from_atoms',
#'scattering_potential',
]
ABSORPTION_WAVELENGTH = 1.798
# Velocity (m/s) <=> wavelength (A)
# lambda = h / p = h (eV) (J/eV) / ( m_n (kg) v (m/s) ) (10^10 A/m)
#
# Since plancks constant is in eV
# lambda = (1e10 * h*electron_volt/(neutron_mass/N_A)) / velocity
# Energy (eV) <=> wavelength (A)
# h^2/(2 m_n kg lambda A) (10^20 A/m) (1000 meV/eV) / (electron_volt J/eV)
# Since plancks constant is in eV
# (h J)^2/electron_volt = ((h eV)(electron_volt J/eV))^2/electron_volt
# = (h eV)^2 * electron_volt
ENERGY_FACTOR = (plancks_constant**2*electron_volt
/ (2 * neutron_mass * atomic_mass_constant)) * 1e23
VELOCITY_FACTOR = (plancks_constant*electron_volt
/ (neutron_mass * atomic_mass_constant)) * 1e10
def neutron_wavelength(energy):
"""
Convert neutron energy to wavelength.
:Parameters:
*energy* : float or vector | meV
:Returns:
*wavelength* : float or vector | |Ang|
Energy is converted to wavelength using
.. math::
E = 1/2 m_n v^2 = h^2 / (2 m_n \lambda^2)
\Rightarrow \lambda = \sqrt{h^2 / (2 m_n E)}
where
$h$ = planck's constant in |Js|
$m_n$ = neutron mass in kg
"""
return sqrt(ENERGY_FACTOR / asarray(energy))
def neutron_wavelength_from_velocity(velocity):
"""
Convert neutron velocity to wavelength.
:Parameters:
*velocity* : float or vector | m/s
:Returns:
*wavelength* : float or vector | |Ang|
Velocity is converted to wavelength using
.. math::
\lambda = h/p = h/(m_n v)
where
$h$ = planck's constant in |Js|
$m_n$ = neutron mass in kg
"""
return VELOCITY_FACTOR / velocity
def neutron_energy(wavelength):
"""
Convert neutron wavelength to energy.
:Parameters:
*wavelength* : float or vector | |Ang|
:Returns:
*energy* : float or vector | meV
Wavelength is converted to energy using
.. math::
E = 1/2 m_n v^2 = h^2 / (2 m_n \lambda^2)
where:
$h$ = planck's constant in |Js|
$m_n$ = neutron mass in kg
"""
return ENERGY_FACTOR / asarray(wavelength)**2
def _CHECK_scattering_potential(sld):
"""
Convert neutron scattering length density to energy potential.
:Parameters:
*sld* : float or vector | |1e-6/Ang^2|
Scattering length density.
:Returns:
*energy* : float or vector | $10^{-6}$ eV
Scattering potential.
Computes:[#Smith2006]_
.. math::
V = 2 \pi \hbar^2 N_b / m_n
where:
$\hbar = h / (2 \pi)$
$h$ = planck's constant in |Js|
$N_b = \sum{ n_i b_i } / V$
$m_n$ = neutron mass in kg
"""
return (ENERGY_FACTOR/pi) * asarray(sld)
class Neutron(object):
"""
Neutron scattering factors are attached to each element in the periodic
table for which values are available. If no information is available,
then the neutron field of the element will be *None*. Even when neutron
information is available, it may not be complete, so individual fields
may be *None*.
The following fields are used:
* b_c (fm)
Bounds coherent scattering length.
* b_c_i (fm)
Imaginary bound coherent scattering length. This is
related to absorption cross section by $\sigma_a = 4 \pi b_i/k$ where
$k = 2 \pi/\lambda$ and an additional factor of 1000 for converting
between |Ang|\ |cdot|\ fm and barns. b_c_i is not available
for all isotopes for which absorption cross sections have been measured.
* bp,bm (fm)
Spin-dependent scattering for I+1/2 and I-1/2 (not always available).
Incoherent scattering arises from the spin-dependent scattering b+
and b-. The Neutron Data Booklet\ [#Rauch2003]_ gives formulas for
calculating coherent and incoherent scattering from b+ and b- alone.
* bp_i,bm_i (fm)
Imaginary portion of bp and bm.
* is_energy_dependent (boolean)
Do not use this data if scattering is energy dependent.
* coherent (barn)
Coherent scattering cross section. In theory coherent scattering
is related to bound coherent scattering by $4 \pi b_c^2/100$.
In practice, these values are different, with the following
table showing the largest relative difference:
======== ======== ======== ======== ========
Sc 3% Ti 4% V 34% Mn 1% Cd 4%
Te 4% Xe 9% Sm 100% Eu 46% Gd 61%
Tb 1% Ho 11% W 4% Au 7% Hg 2%
======== ======== ======== ======== ========
* incoherent (barn)
Incoherent scattering cross section.
* total (barn)
Total scattering cross section. This is just coherent+incoherent.
* absorption (barn)
Absorption cross section at 1.798 |Ang|. Scale to your beam
by dividing by periodictable.nsf.ABSORPTION_WAVELENGTH and multiplying
by your wavelength.
For elements, the scattering cross-sections are based on the natural
abundance of the individual isotopes. Individual isotopes may have
the following additional fields
* abundance (%)
Isotope abundance used to compute the properties of the element in
natural abundance.
* nuclear_spin (string)
Spin on the nucleus: '0', '1/2', '3/2', etc.
Each field ``T`` above has a corresponding ``T_units`` attribute with the name
of the units. For scattering calculations, the scattering length density
is the value of interest. This is computed from the *number_density* of the
individual elements, as derived from the element density and atomic mass.
.. Note:: 1 barn = 100 |fm^2|
"""
b_c = None
b_c_i = None
b_c_units = "fm"
bp = None
bp_i = None
bp_units = "fm"
bm = None
bm_i = None
bm_units = "fm"
coherent = None
coherent_units = "barn"
incoherent = None
incoherent_units = "barn"
total = None
total_units = "barn"
absorption = None
absorption_units = "barn"
abundance = 0.
abundance_units = "%"
is_energy_dependent = False
def __init__(self):
self._number_density = None
def __str__(self):
return "b_c=%.3g coh=%.3g inc=%.3g abs=%.3g"\
%(self.b_c,self.coherent,self.incoherent,self.absorption)
def has_sld(self):
"""Returns *True* if sld is defined for this element/isotope."""
return None not in [self.b_c, self._number_density]
@require_keywords
def sld(self, wavelength=ABSORPTION_WAVELENGTH):
"""
Returns scattering length density for the element at natural
abundance and density.
:Parameters:
*wavelength* : float | |Ang|
:Returns:
*sld* : (float, float, float) | |1e-6/Ang^2|
(*real*, -*imaginary*, *incoherent*) scattering length density.
.. Note:
Values may not be correct when the element or isotope has
*is_energy_dependent=True*
See :func:`neutron_scattering` for details.
"""
# Compute number and absorption density assuming isotope has
# same structure as the bulk element
if not self.has_sld(): return None,None,None
N = self._number_density*1e-24
sigma_c = 0.01 * 4 * pi * self.b_c**2
sigma_a = self.absorption/ABSORPTION_WAVELENGTH*wavelength
sigma_i = self.incoherent
sld_re = N*self.b_c*10
sld_im = N*0.01*sigma_a/(2*wavelength)
sld_inc = N*sqrt ( 100/(4*pi) * sigma_i )*10
return sld_re,sld_im,sld_inc
@require_keywords
def scattering(self,wavelength=ABSORPTION_WAVELENGTH):
"""
Returns neutron scattering information for the element at natural
abundance and density.
:Parameters:
*wavelength* : float | |Ang|
:Returns:
*sld* : (float, float, float) | |1e-6/Ang^2|
(*real*, -*imaginary*, *incoherent*) scattering length density
*xs* : (float, float, float) | |1/cm|
(*coherent*, *absorption*, *incoherent*) cross sections.
*penetration* : float | cm
1/e penetration length.
.. Note:
Values may not be correct when the element or isotope has
*is_energy_dependent=True*
See :func:`neutron_scattering` for details.
"""
# Compute number and absorption density assuming isotope has
# same structure as the bulk element
if not self.has_sld(): return None,None,None
N = self._number_density*1e-24
sigma_c = 0.01 * 4 * pi * self.b_c**2
sigma_a = self.absorption/ABSORPTION_WAVELENGTH*wavelength
sigma_i = self.incoherent
sigma_s = self.total
sld_re = N*self.b_c*10
sld_im = N*0.01*sigma_a/(2*wavelength)
sld_inc = N*sqrt ( 100/(4*pi) * sigma_i )*10
coh_xs = N*sigma_c
abs_xs = N*sigma_a
inc_xs = N*sigma_i
total_xs = N*sigma_s
penetration = 1/(abs_xs+total_xs)
return (sld_re, sld_im, sld_inc), (coh_xs,abs_xs,inc_xs), penetration
def init(table, reload=False):
"""
Loads the Rauch table from the neutron data book.
"""
if 'neutron' in table.properties and not reload: return
table.properties.append('neutron')
assert ('density' in table.properties and
'mass' in table.properties), \
"Neutron table requires mass and density properties"
# Defaults for missing neutron information
missing = Neutron()
Isotope.neutron = missing
Element.neutron = missing
for line in nsftable.split('\n'):
columns = line.split(',')
nsf = Neutron()
p = columns[1]
spin = columns[2]
nsf.b_c,nsf.bp, nsf.bm = [fix_number(a) for a in columns[3:6]]
nsf.is_energy_dependent = (columns[6] == 'E')
nsf.coherent,nsf.incoherent,nsf.total,nsf.absorption \
= [fix_number(a) for a in columns[7:]]
parts = columns[0].split('-')
Z = int(parts[0])
symbol = parts[1]
isotope_number = int(parts[2]) if len(parts)==3 else 0
# Fetch element from the table and check that the symbol matches
element = table[Z]
assert element.symbol == symbol, \
"Symbol %s does not match %s"%(symbol,element.symbol)
# Plug the default number density for the element into the nsf so
# it can calculate sld.
nsf._number_density = element.number_density
# For new elements, clear out 'neutron' attribute for isotopes
# This protects against isotope using the element data when
# they don't have any specific neutron data.
#if isotope_number == 0 or not hasattr(element,'neutron'):
# for iso in element: iso.neutron = None
if isotope_number == 0:
# Bulk values using laboratory abundances of isotopes
element.neutron = nsf
else:
# Values for the individual isotope
isotope = element.add_isotope(isotope_number)
isotope.neutron = nsf
isotope.nuclear_spin = spin
# p column contains either abundance(uncertainty) or "half-life Y"
isotope.neutron.abundance = fix_number(p) if ' ' not in p else 0
# If the element is not yet initialized, copy info into the atom.
# This serves to set the element info for elements with only
# one isotope.
if element.neutron is missing:
element.neutron = nsf
for line in nsftableI.split('\n'):
columns = line.split(',')
# Fetch the nsf record
parts = columns[0].split('-')
Z = int(parts[0])
symbol = parts[1]
isotope_number = int(parts[2]) if len(parts)==3 else 0
element = table[Z]
if isotope_number == 0:
nsf = element.neutron
else:
nsf = element[isotope_number].neutron
# Read imaginary values
nsf.b_c_i,nsf.bp_i,nsf.bm_i = [fix_number(a) for a in columns[1:]]
# Xe total cross section is missing from the table
# put it in even though it has not been independently measured
if table.Xe.neutron.total is None:
table.Xe.neutron.total = table.Xe.neutron.coherent + table.Xe.neutron.incoherent
# Note: docs and function prototype are reproduced in __init__
@require_keywords
def neutron_scattering(compound, density=None,
wavelength=ABSORPTION_WAVELENGTH, energy=None,
natural_density=None):
r"""
Computes neutron scattering cross sections for molecules.
:Parameters:
*compound* : Formula initializer
Chemical formula
*density* : float | |g/cm^3|
Mass density
*natural_density* : float | |g/cm^3|
Mass density of formula with naturally occuring abundances
*wavelength* 1.798 : float | |Ang|
Neutron wavelength.
*energy* : float | meV
Neutron energy. If energy is specified then wavelength is ignored.
:Returns:
*sld* : (float, float, float) | |1e-6/Ang^2|
(*real*, -*imaginary*, *incoherent*) scattering length density.
*xs* : (float, float, float) | |1/cm|
(*coherent*, *absorption*, *incoherent*) cross sections.
*penetration* : float | cm
1/e penetration depth of the beam
:Raises:
*AssertionError* : density is missing.
.. Note:
Values may not correct if any element or isotope has
*is_energy_dependent=True*
The coherent and incoherent cross sections are calculated from the
bound scattering lengths for nuclei. The actual cross sections depend
on the incoming neutron energy and sample temperature, especially for
light elements. For low energy neutrons (cold neutrons), the tabulated
cross sections are generally a lower limit. The measured incoherent
scattering from hydrogen, for example, can be considerably larger
(by more than 20%) than its bound value. For example, the incoherent
scattering cross section of H2O is 5.621/cm as computed from these tables
compared to ~7.0/cm as measured with 5 meV neutrons at 290K. [#May1982]_
The scattering factor tables are not self consistent. The following
functions show discrepencies between the various measurements of the
scattering potential:
:func:`absorption_comparison_table`
:func:`coherent_comparison_table`
:func:`total_comparison_table`
To compute the neutron cross sections we first need to average
quantities for the unit cell of the molecule.
Molar mass *m* (g/mol) is the sum of the masses of each component:
.. math::
m = \sum{n_i m_i}\ {\rm for\ each\ atom}\ i=1,2,\ldots
Cell volume $V$ (|Ang^3|/molecule) is molar mass $m$ over density
$rho$, with a correction based on Avogadro's number $N_A$ (atoms/mol)
and the length conversion $10^8$ |Ang|/cm:
.. math::
V = m/\rho \cdot 1/N_A \cdot (10^8)^3
Number density $N$ is the number of scatterers per unit volume:
.. math::
N = \left.\sum{n_i} \right/ V
Coherent scattering cross section $\sigma_c$ of the molecule is computed
from the average scattering length of its constituent atoms, weighted by
their frequency.
.. math::
b_c = \left.\sum n_i \rm{Im}(b_c) \right/ \sum n_i
This is converted to a scattering cross section and scaled
by 1 barn = 100 |fm^2|:
.. math::
\sigma_c = \left. 4 \pi b_c^2 \right/ 100
Similarly, the absorption cross section $\sigma_a$, the incoherent cross
section $\sigma_i$, and the total cross section $\sigma_s$ can be computed
from the corresponding cross sections of the constituent elements,\ [#Sears1999]_
already expressed in barns:
.. math::
\sigma_a &= \left. \sum n_j \sigma_{aj} \right/ \sum n_j \\
\sigma_i &= \left. \sum n_j \sigma_{ij} \right/ \sum n_j \\
\sigma_s &= \left. \sum n_j \sigma_{sj} \right/ \sum n_j
The neutron cross sections are tabulated at wavelength 1.798 |Ang|.
In the thermal neutron energy range most absorption cross sections
scale linearly with wavelength,\ [#Lynn1990]_ and can be adjusted
with a simple multiplication:
.. math::
\sigma_a = \sigma_a \lambda / \lambda_o = \sigma_a \lambda / 1.798
If *isotope.neutron.is_energy_dependent()* is true for any part of
the material, then this relation may not hold, and the returned values
are only valid for 1.798 |Ang|.
From the scattering cross sections, the scattering length for a material
$b = b' = i b''$ can be computed using the following relations:[#Sears1999]_
.. math::
\sigma_c &= 4 \pi |b_c|^2 \\
\sigma_a &= \left. 4 \pi \left< b'' \right> \right/k \ {\rm for} \ k=2\pi / \lambda \\
\sigma_i &= 4 \pi |b_i|^2 \\
\sigma_s &= 4 \pi \left< |b|^2 \right>
Transforming we get:
.. math::
b'' &= \left. \sigma_a \right/ (2 \lambda) \\
b_i &= \sqrt{ \sigma_i / (4 \pi) }
The incoherent scattering length $b_i$ can be treated primarily
as an absorption length in large scale structure calculations, with the
complex scattering length approximated by $b = b_c - i (b'' + b_i)$.
The scattering potential is often expressed as a scattering length
density (SLD). This is just the number density of the scatterers times
their scattering lengths, with a correction for units.
.. math::
\rho_{\rm re} &= 10 N b_c \\
\rho_{\rm im} &= -N b'' / 100 \\
\rho_{\rm inc} &= 10 N b_i
with the factors of 10 chosen to give SLD in units of $\AA^{-2}$. The
resulting $\rho = \rho_{\rm re} + i \rho_{\rm im}$ can be used in the
scattering equations. Treatment of the incoherent scattering $\rho_{\rm inc}$
will depend on the equation. For example, it can be treated as an absorption
in specular reflectivity calculations since the incoherently scattered neutrons
are removed from the multilayer recurrence calculation.
Similarly, scattering cross section includes number density:
.. math::
\Sigma_{\rm coh} &= N \sigma_c \\
\Sigma_{\rm inc} &= N \sigma_i \\
\Sigma_{\rm abs} &= N \sigma_a \\
\Sigma_{\rm s} &= N \sigma_s
The 1/e penetration depth *t_u* represents the the depth into the sample at
which the unscattered intensity is reduced by a factor of $e$:
.. math::
t_u = \left. 1 \right/ (\Sigma_{\rm s} + \Sigma_{\rm abs})
Note that the calculated penetration depth includes the effects of both
absorption and incoherent scattering (which spreads the beam in the
full $4\pi$ spherical surface, and so it looks like absorption with
respect to the beam), as well as the coherent scattering from the sample.
If you instead want to calculate the effective shielding of the sample,
you should recalculate penetration depth without the coherent scattering.
Transmission rate can be computed from $e^{-d/t_u}$ for penetration i
depth $t_u$ and sample thickness $d$.
In general, the total scattering cross section $\Sigma_{\rm s}$ is not the
sum of the coherent and incoherent cross sections
$\Sigma_{\rm coh}+\Sigma_{\rm inc}$.\ [#Glinka2011]_
Including unit conversion with $\mu=10^{-6}$ the full scattering equations
are:
.. math::
\rho_{\rm re}\,(\mu/\AA^2) &= (N/\AA^3)
\, (b_c\,{\rm fm})
\, (10^{-5} \AA/{\rm\,fm})
\, (10^6\,\mu) \\
\rho_{\rm im}\,(\mu/\AA^2) &= (N/\AA^3)
\, (\sigma_a\,{\rm barn})
\, (10^{-8}\,\AA^2/{\rm barn}) / (2 \lambda\, \AA)
\, (10^6\,\mu) \\
\rho_{\rm inc}\,(\mu/\AA^2) &= (N/\AA^3)
\, \sqrt{(\sigma_i\, {\rm barn})/(4 \pi)
\, (100\, {\rm fm}^2/{\rm barn})}
\, (10^{-5}\, \AA/{\rm fm})
\, (10^6\, \mu) \\
\Sigma_{\rm coh}\,(1/{\rm cm}) &= (N/\AA^3)
\, (\sigma_c\, {\rm barn})
\, (10^{-8}\, \AA^2/{\rm barn})
\, (10^8\, \AA/{\rm cm}) \\
\Sigma_{\rm inc}\,(1/{\rm cm}) &= (N/\AA^3)
\,(\sigma_i\, {\rm barn})
\, (10^{-8}\, \AA^2/{\rm barn})
\, (10^8\, \AA/{\rm cm}) \\
\Sigma_{\rm abs}\,(1/{\rm cm}) &= (N/\AA^3)
\,(\sigma_a\,{\rm barn})
\, (10^{-8}\, \AA^2/{\rm barn})
\, (10^8\, \AA/{\rm cm}) \\
\Sigma_{\rm s}\,(1/{\rm cm}) &= (N/\AA^3)
\,(\sigma_s\,{\rm barn})
\, (10^{-8}\, \AA^2/{\rm barn})
\, (10^8\, \AA/{\rm cm}) \\
t_u\,({\rm cm}) &= 1/(\Sigma_{\rm s}\, 1/{\rm cm}
\,+\, \Sigma_{\rm abs}\, 1/{\rm cm})
"""
from . import formulas
compound = formulas.formula(compound, density=density,
natural_density=natural_density)
assert compound.density is not None, "scattering calculation needs density"
if energy is not None:
wavelength = neutron_wavelength(energy)
assert wavelength is not None, "scattering calculation needs energy or wavelength"
# Sum over the quantities
molar_mass = num_atoms = 0
sigma_s = sigma_a = sigma_i = b_c = 0
is_energy_dependent = False
for element,quantity in compound.atoms.items():
if not element.neutron.has_sld(): return None, None, None
#print element,quantity,element.neutron.b_c,element.neutron.absorption,element.neutron.total
molar_mass += element.mass*quantity
num_atoms += quantity
sigma_a += quantity * element.neutron.absorption
sigma_i += quantity * element.neutron.incoherent
sigma_s += quantity * element.neutron.total
b_c += quantity * element.neutron.b_c
is_energy_dependent |= element.neutron.is_energy_dependent
# If nothing to sum, return values for a vacuum. This might be because
# the material has no atoms or it might be because the density is zero.
if molar_mass*compound.density == 0:
return (0,0,0), (0,0,0), inf
# Turn sums into scattering factors
b_c /= num_atoms
sigma_c = 4*pi/100*b_c**2
sigma_i /= num_atoms
sigma_a *= wavelength/ABSORPTION_WAVELENGTH/num_atoms
sigma_s /= num_atoms
#sigma_c_diffuse = sigma_s - (sigma_c + sigma_i)
# Compute number density
cell_volume = (molar_mass/compound.density)/avogadro_number*1e24 # (10^8 A/cm)^3
number_density = num_atoms / cell_volume
# Compute SLD
sld_re = number_density * b_c * 10
sld_im = number_density * sigma_a / (2 * wavelength) * 0.01
sld_inc = number_density * sqrt ( (100/(4*pi) * sigma_i) ) * 10
# Compute scattering cross section per unit volume
coh_xs = sigma_c * number_density
abs_xs = sigma_a * number_density
inc_xs = sigma_i * number_density
total_xs = sigma_s * number_density
# Compute 1/e length
penetration = 1/(abs_xs + total_xs)
return (sld_re,sld_im,sld_inc), (coh_xs,abs_xs,inc_xs), penetration
def neutron_sld(*args, **kw):
"""
Computes neutron scattering length densities for molecules.
:Parameters:
*compound* : Formula initializer
Chemical formula
*density* : float | |g/cm^3|
Mass density
*natural_density* : float | |g/cm^3|
Mass density of formula with naturally occuring abundances
*wavelength* : float | |Ang|
Neutron wavelength.
*energy* : float | meV
Neutron energy. If energy is specified then wavelength is ignored.
:Returns:
*sld* : (float, float, float) | |1e-6/Ang^2|
(*real*, -*imaginary*, *incoherent*) scattering length density.
:Raises:
*AssertionError* : density is missing.
Returns the scattering length density of the compound.
See :func:`neutron_scattering` for details.
"""
return neutron_scattering(*args, **kw)[0]
def neutron_sld_from_atoms(*args, **kw):
"""
.. deprecated:: 0.91
:func:`neutron_sld` now accepts dictionaries of \{atom\: count\} directly.
"""
return neutron_scattering(*args, **kw)[0]
def _sum_piece(wavelength, compound):
"""
Helper for neutron_composite_sld which precomputes quantities of interest
for material fragments in a composite formula.
"""
# Sum over the quantities
molar_mass = num_atoms = 0
sigma_a = sigma_i = b_c = 0
is_energy_dependent = False
for element,quantity in compound.atoms.items():
#print element,quantity,element.neutron.b_c,element.neutron.absorption,element.neutron.total
molar_mass += element.mass*quantity
num_atoms += quantity
sigma_a += quantity * element.neutron.absorption
sigma_i += quantity * element.neutron.incoherent
b_c += quantity * element.neutron.b_c
is_energy_dependent |= element.neutron.is_energy_dependent
return num_atoms, molar_mass, b_c, sigma_i, sigma_a
def neutron_composite_sld(materials, wavelength=ABSORPTION_WAVELENGTH):
"""
Create a composite SLD calculator.
:Parameters:
*materials* : [Formula]
List of materials
*wavelength* = 1.798: float OR [float] | |Ang|
Probe wavelength(s).
:Returns:
*calculator* : f(w, density=1) -> (*real*, -*imaginary*, *incoherent*)
The composite calculator takes a vector of weights and returns the
scattering length density of the composite. This is useful for operations
on large molecules, such as calculating a set of constrasts or fitting
a material composition.
Table lookups and partial sums and constants are precomputed so that
the calculation consists of a few simple array operations regardless
of the size of the material fragments.
"""
parts = [_sum_piece(wavelength, m) for m in materials]
V = [numpy.array(v) for v in zip(*parts)]
SLD_IM = 0.01/(2*ABSORPTION_WAVELENGTH)
SLD_RE = 10
SLD_INC = 10*sqrt(100/(4*pi))
SIGMA_C = 4*pi/100
NB = 1e-24*avogadro_number # assumes density is 1
def _compute(q, density=1):
atoms = numpy.sum(q*V[0])
number_density = density * NB * atoms / numpy.sum(q*V[1])
b_c = numpy.sum(q*V[2])/atoms
sigma_c = SIGMA_C * b_c**2
sigma_i = numpy.sum(q*V[3])/atoms
sigma_a = numpy.sum(q*V[4])/atoms # at tabulated wavelength
sld_re = number_density * b_c * SLD_RE
sld_im = number_density * sigma_a * SLD_IM
sld_inc = number_density * sqrt(sigma_i) * SLD_INC
return sld_re, sld_im, sld_inc
return _compute
def sld_plot(table=None):
"""
Plots SLD as a function of element number.
:Parameters:
*table* : PeriodicTable
The default periodictable unless a specific table has been requested.
:Returns: None
"""
from .plot import table_plot
table = default_table(table)
SLDs = dict((el,el.neutron.sld()[0])
for el in table
if el.neutron.has_sld())
SLDs[table.D] = table.D.neutron.sld()[0]
table_plot(SLDs, label='Scattering length density ($10^{-6}$ Nb)',
title='Neutron SLD for elements in natural abundance')
# We are including the complete original table here in case somebody in
# future wants to extract uncertainties or other information.
#
# Z-Symbol-A
# This is the atomic number, the symbol and the isotope.
# If Z-Symbol only, the line represents an element with scattering determined
# by the natural abundance of the isotopes in laboratory samples. If there
# is only one isotope, then there is no corresponding element definition.
# concentration/half-life
# This is the natural abundance of the isotope expressed as a percentage, or
# it is the half-life in years (number Y) or seconds (number S).
# spin I
# For isotopes, the nuclear spin.
# b_c, bp, bm
# Bound coherent scattering length in fm
# b+/b- if present are spin dependent scattering for I+1/2 and I-1/2
# respectively
# c
# 'E' if there is a strong energy dependency.
# '+/-' if separate b+/b- values are available [doesn't seem true -PAK]
# coherent, incoherent, total
# The coherent and incoherent scattering cross-sections in barns.
# absorption
# The thermal absorption cross section in barns at 1.798 Angstroms/25.30 meV.
#
# Numbers in parenthesis represents uncertainty.
# Numbers followed by '*' are estimated.
# Numbers may be given as limit, e.g., <1.0e-6
#
# Formatting corrections by Paul Kienzle
nsftable="""\
0-n-1,618 S,1/2,-37.0(6),0,-37.0(6),,43.01(2),,43.01(2),0
1-H,,,-3.7409(11),,,,1.7568(10),80.26(6),82.02(6),0.3326(7)
1-H-1,99.985,1/2,-3.7423(12),10.817(5),-47.420(14),+/-,1.7583(10),80.27(6),82.03(6),0.3326(7)
1-H-2,0.0149,1,6.674(6),9.53(3),0.975(60),,5.592(7),2.05(3),7.64(3),0.000519(7)
1-H-3,12.26 Y,1/2,4.792(27),4.18(15),6.56(37),,2.89(3),0.14(4),3.03(5),<6.0E-6
2-He,,,3.26(3),,,,1.34(2),0,1.34(2),0.00747(1)
2-He-3,0.013,1/2,5.74(7),4.7(5),8.8(1.4),E,4.42(10),1.6(4),6.0(4),5333.0(7.0)
2-He-4,99.987,0,3.26(3),,,,1.34(2),0,1.34(2),0
3-Li,,,-1.90(3),,,,0.454(10),0.92(3),1.37(3),70.5(3)
3-Li-6,7.5,1,2.0(1),0.67(14),4.67(17),+/-,0.51(5),0.46(5),0.97(7),940.0(4.0)
3-Li-7,92.5,3/2,-2.22(2),-4.15(6),1.00(8),+/-,0.619(11),0.78(3),1.40(3),0.0454(3)
4-Be-9,100,3/2,7.79(1),,,,7.63(2),0.0018(9),7.63(2),0.0076(8)
5-B,,,5.30(4),,,,3.54(5),1.70(12),5.24(11),767.0(8.0)
5-B-10,19.4,3,-0.2(4),-4.2(4),5.2(4),,0.144(6),3.0(4),3.1(4),3835.0(9.0)
5-B-11,80.2,3/2,6.65(4),5.6(3),8.3(3),,5.56(7),0.21(7),5.77(10),0.0055(33)
6-C,,,6.6484(13),,,,5.551(2),0.001(4),5.551(3),0.00350(7)
6-C-12,98.89,0,6.6535(14),,,,5.559(3),0,5.559(3),0.00353(7)
6-C-13,1.11,1/2,6.19(9),5.6(5),6.2(5),+/-,4.81(14),0.034(11),4.84(14),0.00137(4)
7-N,,,9.36(2),,,,11.01(5),0.50(12),11.51(11),1.90(3)
7-N-14,99.635,1,9.37(2),10.7(2),6.2(3),,11.03(5),0.50(12),11.53(11),1.91(3)
7-N-15,0.365,1/2,6.44(3),6.77(10),6.21(10),,5.21(5),0.00005(10),5.21(5),0.000024(8)
8-O,,,5.805(4),,,,4.232(6),0.000(8),4.232(6),0.00019(2)
8-O-16,99.75,0,5.805(5),,,,4.232(6),0,4.232(6),0.00010(2)
8-O-17,0.039,5/2,5.6(5),5.52(20),5.17(20),,4.20(22),0.004(3),4.20(22),0.236(10)
8-O-18,0.208,0,5.84(7),,,,4.29(10),0,4.29(10),0.00016(1)
9-F-19,100,1/2,5.654(12),5.632(10),5.767(10),+/-,4.017(14),0.0008(2),4.018(14),0.0096(5)
10-Ne,,,4.566(6),,,,2.620(7),0.008(9),2.628(6),0.039(4)
10-Ne-20,90.5,0,4.631(6),,,,2.695(7),0,2.695(7),0.036(4)
10-Ne-21,0.27,3/2,6.66(19),,,,5.6(3),0.05(2),5.7(3),0.67(11)
10-Ne-22,9.2,0,3.87(1),,,,1.88(1),0,1.88(1),0.046(6)
11-Na-23,100,3/2,3.63(2),6.42(4),-1.00(6),+/-,1.66(2),1.62(3),3.28(4),0.530(5)
12-Mg,,,5.375(4),,,,3.631(5),0.08(6),3.71(4),0.063(3)
12-Mg-24,78.99,0,5.49(18),,,,4.03(4),0,4.03(4),0.050(5)
12-Mg-25,10,5/2,3.62(14),4.73(30),1.76(20),+/-,1.65(13),0.28(4),1.93(14),0.19(3)
12-Mg-26,11,0,4.89(15),,,,3.00(18),0,3.00(18),0.0382(8)
13-Al-27,100,5/2,3.449(5),3.67(2),3.15(2),,1.495(4),0.0082(6),1.503(4),0.231(3)
14-Si,,,4.15071(22),,,,2.1633(10),0.004(8),2.167(8),0.171(3)
14-Si-28,92.2,0,4.106(6),,,,2.120(6),0,2.120(6),0.177(3)
14-Si-29,4.7,1/2,4.7(1),4.50(15),4.7(4),+/-,2.78(12),0.001(2),2.78(12),0.101(14)
14-Si-30,3.1,0,4.58(8),,,,2.64(9),0,2.64(9),0.107(2)
15-P-31,100,1/2,5.13(1),,,+/-,3.307(13),0.005(10),3.312(16),0.172(6)
16-S,,,2.847(1),,,,1.0186(7),0.007(5),1.026(5),0.53(1)
16-S-32,95,0,2.804(2),,,,0.9880(14),0,0.9880(14),0.54(4)
16-S-33,0.74,3/2,4.74(19),,,+/-,2.8(2),0.3(6),3.1(6),0.54(4)
16-S-34,4.2,0,3.48(3),,,,1.52(3),0,1.52(3),0.227(5)
16-S-36,0.02,0,3.0(1.0)*,,,,1.1(8),0,1.1(8),0.15(3)
17-Cl,,,9.5792(8),,,,11.528(2),5.3(5),16.8(5),33.5(3)
17-Cl-35,75.77,3/2,11.70(9),16.3(2),4.0(3),+/-,17.06(6),4.7(6),21.8(6),44.1(4)
17-Cl-37,24.23,3/2,3.08(6),3.10(7),3.05(7),+/-,1.19(5),0.001(3),1.19(5),0.433(6)
18-Ar,,,1.909(6),,,,0.458(3),0.225(5),0.683(4),0.675(9)
18-Ar-36,0.34,0,24.9(7),,,,77.9(4),0,77.9(4),5.2(5)
18-Ar-38,0.07,0,3.5(3.5),,,,1.5(3.1),0,1.5(3.1),0.8(5)
18-Ar-40,99.59,0,1.7,,,,0.421(3),0,0.421(3),0.660(9)
19-K,,,3.67(2),,,,1.69(2),0.27(11),1.96(11),2.1(1)
19-K-39,93.3,3/2,3.79(2),5.15,1.51,+/-,1.76(2),0.25(11),2.01(11),2.1(1)
19-K-40,0.012,4,3.1(1.0)*,,,,1.1(6),0.5(5)*,1.6(9),35.0(8.0)
19-K-41,6.7,3/2,2.69(8),,,,0.91(5),0.3(6),1.2(6),1.46(3)
20-Ca,,,4.70(2),,,,2.78(2),0.05(3),2.83(2),0.43(2)
20-Ca-40,96.94,0,4.78(5),,,,2.90(2),0,2.90(2),0.41(2)
20-Ca-42,0.64,0,3.36(10),,,,1.42(8),0,1.42(8),0.68(7)
20-Ca-43,0.13,7/2,-1.56(9),,,,0.31(4),0.5(5),0.8(5),6.2(6)
20-Ca-44,2.13,0,1.42(6),,,,0.25(2),0,0.25(2),0.88(5)
20-Ca-46,0.003,0,3.55(21),,,,1.6(2),0,1.6(2),0.74(7)
20-Ca-48,0.18,0,0.39(9),,,,0.019(9),0,0.019(9),1.09(14)
21-Sc-45,100,7/2,12.1(1),6.91(22),18.99(28),+/-,19.0(3),4.5(3),23.5(6),27.5(2)
22-Ti,,,-3.370(13),,,,1.485(2),2.87(3),4.35(3),6.09(13)
22-Ti-46,8,0,4.72(5),,,,3.05(7),0,3.05(7),0.59(18)
22-Ti-47,7.5,5/2,3.53(7),0.46(23),7.64(13),,1.66(11),1.5(2),3.2(2),1.7(2)
22-Ti-48,73.7,0,-5.86(2),,,,4.65(3),0,4.65(3),7.84(25)
22-Ti-49,5.5,7/2,0.98(5),2.6(3),-1.2(4),,0.14(1),3.3(3),3.4(3),2.2(3)
22-Ti-50,5.3,0,5.88(10),,,,4.80(12),0,4.80(12),0.179(3)
23-V,,,-0.443(14),,,,0.01838(12),5.08(6),5.10(6),5.08(4)
23-V-50,0.25,6,7.6(6)*,,,,7.3(1.1),0.5(5)*,7.8(1.0),60.0(40.0)
23-V-51,99.75,7/2,-0.402(2),4.93(25),-7.58(28),+/-,0.0203(2),5.07(6),5.09(6),4.9(1)
24-Cr,,,3.635(7),,,,1.660(6),1.83(2),3.49(2),3.05(6)
24-Cr-50,4.35,0,-4.50(5),,,,2.54(6),0,2.54(6),15.8(2)
24-Cr-52,83.8,0,4.914(15),,,,3.042(12),0,3.042(12),0.76(6)
24-Cr-53,9.59,3/2,-4.20(3),1.16(10),-13.0(2),,2.22(3),5.93(17),8.15(17),18.1(1.5)
24-Cr-54,2.36,0,4.55(10),,,,2.60(11),0,2.60(11),0.36(4)
25-Mn-55,100,5/2,-3.750(18),-4.93(46),-1.46(33),,1.75(2),0.40(2),2.15(3),13.3(2)
26-Fe,,,9.45(2),,,,11.22(5),0.40(11),11.62(10),2.56(3)
26-Fe-54,5.8,0,4.2(1),,,,2.2(1),0,2.2(1),2.25(18)
26-Fe-56,91.7,0,10.1(2),,,,12.42(7),0,12.42(7),2.59(14)
26-Fe-57,2.19,1/2,2.3(1),,,,0.66(6),0.3(3)*,1.0(3),2.48(30)
26-Fe-58,0.28,0,15(7),,,,28.0(26.0),0,28.0(26.0),1.28(5)
27-Co-59,100,7/2,2.49(2),-9.21(10),3.58(10),+/-,0.779(13),4.8(3),5.6(3),37.18(6)
28-Ni,,,10.3(1),,,,13.3(3),5.2(4),18.5(3),4.49(16)
28-Ni-58,67.88,0,14.4(1),,,,26.1(4),0,26.1(4),4.6(3)
28-Ni-60,26.23,0,2.8(1),,,,0.99(7),0,0.99(7),2.9(2)
28-Ni-61,1.19,3/2,7.60(6),,,,7.26(11),1.9(3),9.2(3),2.5(8)
28-Ni-62,3.66,0,-8.7(2),,,,9.5(4),0,9.5(4),14.5(3)
28-Ni-64,1.08,0,-0.37(7),,,,0.017(7),0,0.017(7),1.52(3)
29-Cu,,,7.718(4),,,,7.485(8),0.55(3),8.03(3),3.78(2)
29-Cu-63,69.1,3/2,6.477(13),,,+/-,5.2(2),0.006(1),5.2(2),4.50(2)
29-Cu-65,30.9,3/2,10.204(20),,,+/-,14.1(5),0.40(4),14.5(5),2.17(3)
30-Zn,,,5.680(5),,,,4.054(7),0.077(7),4.131(10),1.11(2)
30-Zn-64,48.9,0,5.23(4),,,,3.42(5),0,3.42(5),0.93(9)
30-Zn-66,27.8,0,5.98(5),,,,4.48(8),0,4.48(8),0.62(6)
30-Zn-67,4.1,5/2,7.58(8),5.8(5),10.1(7),+/-,7.18(15),0.28(3),7.46(15),6.8(8)
30-Zn-68,18.6,0,6.04(3),,,,4.57(5),0,4.57(5),1.1(1)
30-Zn-70,0.62,0,6.9(1.0)*,,,,4.5(1.5),0,4.5(1.5),0.092(5)
31-Ga,,,7.288(2),,,,6.675(4),0.16(3),6.83(3),2.75(3)
31-Ga-69,60,3/2,8.043(16),6.3(2),10.5(4),+/-,7.80(4),0.091(11),7.89(4),2.18(5)
31-Ga-71,40,3/2,6.170(11),5.5(6),7.8(1),+/-,5.15(5),0.084(8),5.23(5),3.61(10)
32-Ge,,,8.185(20),,,,8.42(4),0.18(7),8.60(6),2.20(4)
32-Ge-70,20.7,0,10.0(1),,,,12.6(3),0,12.6(3),3.0(2)
32-Ge-72,27.5,0,8.51(10),,,,9.1(2),0,9.1(2),0.8(2)
32-Ge-73,7.7,9/2,5.02(4),8.1(4),1.2(4),,3.17(5),1.5(3),4.7(3),15.1(4)
32-Ge-74,36.4,0,7.58(10),,,,7.2(2),0,7.2(2),0.4(2)
32-Ge-76,7.7,0,8.2(1.5),,,,8.0(3.0),0,8.0(3.0),0.16(2)
33-As-75,100,3/2,6.58(1),6.04(5),7.47(8),+/-,5.44(2),0.060(10),5.50(2),4.5(1)
34-Se,,,7.970(9),,,,7.98(2),0.32(6),8.30(6),11.7(2)
34-Se-74,0.9,0,0.8(3.0),,,,0.1(6),0,0.1(6),51.8(1.2)
34-Se-76,9,0,12.2(1),,,,18.7(3),0,18.7(3),85.0(7.0)
34-Se-77,7.5,0,8.25(8),,,,8.6(2),0.05(25),8.65(16),42.0(4.0)
34-Se-78,23.5,0,8.24(9),,,,8.5(2),0,8.5(2),0.43(2)
34-Se-80,50,0,7.48(3),,,,7.03(6),0,7.03(6),0.61(5)
34-Se-82,8.84,0,6.34(8),,,,5.05(13),0,5.05(13),0.044(3)
35-Br,,,6.79(2),,,,5.80(3),0.10(9),5.90(9),6.9(2)
35-Br-79,50.49,3/2,6.79(7),,,+/-,5.81(2),0.15(6),5.96(13),11.0(7)
35-Br-81,49.31,3/2,6.78(7),,,+/-,5.79(12),0.05(2),5.84(12),2.7(2)
36-Kr,,,7.81(2),,,,7.67(4),0.01(14),7.68(13),25.0(1.0)
36-Kr-78,0.35,0,,,,,,0,,6.4(9)
36-Kr-80,2.5,0,,,,,,0,,11.8(5)
36-Kr-82,11.6,0,,,,,,0,,29.0(20.0)
36-Kr-83,11.5,9/2,,,,,,,,185.0(30.0)
36-Kr-84,57,0,,,,,,0,6.6,0.113(15)
36-Kr-86,17.3,0,8.07(26),,,,8.2(4),0,8.2(4),0.003(2)
37-Rb,,,7.08(2),,,,6.32(4),0.5(4),6.8(4),0.38(1)
37-Rb-85,72.17,5/2,7.07(10),,,,6.2(2),0.5(5)*,6.7(5),0.48(1)
37-Rb-87,27.83,3/2,7.27(12),,,,6.6(2),0.5(5)*,7.1(5),0.12(3)
38-Sr,,,7.02(2),,,,6.19(4),0.06(11),6.25(10),1.28(6)
38-Sr-84,0.56,0,5.0(2.0),,,,6.0(2.0),0,6.0(2.0),0.87(7)
38-Sr-86,9.9,0,5.68(5),,,,4.04(7),0,4.04(7),1.04(7)
38-Sr-87,7,9/2,7.41(7),,,,6.88(13),0.5(5)*,7.4(5),16.0(3.0)
38-Sr-88,82.6,0,7.16(6),,,,6.42(11),0,6.42(11),0.058(4)
39-Y-89,100,1/2,7.75(2),8.4(2),5.8(5),+/-,7.55(4),0.15(8),7.70(9),1.28(2)
40-Zr,,,7.16(3),,,,6.44(5),0.02(15),6.46(14),0.185(3)
40-Zr-90,51.48,0,6.5(1),,,,5.1(2),0,5.1(2),0.011(59
40-Zr-91,11.23,5/2,8.8(1),7.9(2),10.1(2),+/-,9.5(2),0.15(4),9.7(2),1.17(10)
40-Zr-92,17.11,0,7.5(2),,,,6.9(4),0,6.9(4),0.22(6)
40-Zr-94,17.4,0,8.3(2),,,,8.4(4),0,8.4(4),0.0499(24)
40-Zr-96,2.8,0,5.5(1),,,,3.8(1),0,3.8(1),0.0229(10)
41-Nb-93,100,9/2,7.054(3),7.06(4),7.35(4),+/-,6.253(5),0.0024(3),6.255(5),1.15(6)
42-Mo,,,6.715(20),,,,5.67(3),0.04(5),5.71(4),2.48(4)
42-Mo-92,15.48,0,6.93(8),,,,6.00(14),0,6.00(14),0.019(2)
42-Mo-94,9.1,0,6.82(7),,,,5.81(12),0,5.81(12),0.015(2)
42-Mo-95,15.72,5/2,6.93(7),,,,6.00(10),0.5(5)*,6.5(5),13.1(3)
42-Mo-96,16.53,0,6.22(6),,,,4.83(9),0,4.83(9),0.5(2)
42-Mo-97,9.5,5/2,7.26(8),,,,6.59(15),0.5(5)*,7.1(5),2.5(2)
42-Mo-98,23.78,0,6.60(7),,,,5.44(12),0,5.44(12),0.127(6)
42-Mo-100,9.6,0,6.75(7),,,,5.69(12),0,5.69(12),0.4(2)
43-Tc-99,210000 Y,9/2,6.8(3),,,,5.8(5),0.5(5)*,6.3(7),20.0(1.0)
44-Ru,,,7.02(2),,,,6.21(5),0.4(1),6.6(1),2.56(13)
44-Ru-96,5.8,0,,,,,,0,,0.28(2)
44-Ru-98,1.9,0,,,,,,0,,<8.0
44-Ru-99,12.7,5/2,,,,,,,,6.9(1.0)
44-Ru-100,12.6,0,,,,,,0,,4.8(6)
44-Ru-101,17.07,5/2,,,,,,,,3.3(9)
44-Ru-102,31.61,0,,,,,,0,,1.17(7)
44-Ru-104,18.58,0,,,,,,0,,0.31(2)
45-Rh-103,100,1/2,5.90(4),8.15(6),6.74(6),,4.34(6),0.3(3)*,4.6(3),144.8(7)
46-Pd,,,5.91(6),,,,4.39(9),0.093(9),4.48(9),6.9(4)
46-Pd-102,1,0,7.7(7)*,,,,7.5(1.4),0,7.5(1.4),3.4(3)
46-Pd-104,11,0,7.7(7)*,,,,7.5(1.4),0,7.5(1.4),0.6(3)
46-Pd-105,22.33,5/2,5.5(3),,,+/-,3.8(4),0.8(1.0),4.6(1.1),20.0(3.0)
46-Pd-106,27.33,0,6.4(4),,,,5.1(6),0,5.1(6),0.304(29)
46-Pd-108,26.71,0,4.1(3),,,,2.1(3),0,2.1(3),8.5(5)
46-Pd-110,11.8,0,7.7(7)*,,,,7.5(1.4),0,7.5(1.4),0.226(31)
47-Ag,,,5.922(7),,,,4.407(10),0.58(3),4.99(3),63.3(4)
47-Ag-107,51.8,1/2,7.555(11),8.14(9),5.8(3),+/-,7.17(2),0.13(3),7.30(4),37.6(1.2)
47-Ag-109,48.2,1/2,4.165(11),3.24(8),6.9(2),+/-,2.18(1),0.32(5),2.50(5),91.0(1.0)
48-Cd,,,4.83(5),,,E,3.04(6),3.46(13),6.50(12),2520.0(50.0)
48-Cd-106,1.2,0,5.0(2.0)*,,,,3.1(2.5),0,3.1(2.5),1.0(2.0)
48-Cd-108,0.9,0,5.31(24),,,,3.7(1),0,3.7(1),1.1(3)
48-Cd-110,12.39,0,5.78(8),,,,4.4(1),0,4.4(1),11.0(1.0)
48-Cd-111,12.75,1/2,6.47(8),,,,5.3(2),0.3(3)*,5.6(4),24.0(5.0)
48-Cd-112,24.07,0,6.34(6),,,,5.1(2),0,5.1(2),2.2(5)
48-Cd-113,12.36,1/2,-8.0(1),,,E,12.1(4),0.3(3)*,12.4(5),20600.0(400.0)
48-Cd-114,28.86,0,7.48(5),,,,7.1(2),0,7.1(2),0.34(2)
48-Cd-116,7.58,0,6.26(9),,,,5.0(2),0,5.0(2),0.075(13)
49-In,,,4.065(20),,,,2.08(2),0.54(11),2.62(11),193.8(1.5)
49-In-113,4.28,9/2,5.39(6),,,,3.65(8),0.000037(5),3.65(8),12.0(1.1)
49-In-115,95.72,9/2,4.00(3),2.1(1),6.4(4),,2.02(2),0.55(11),2.57(11),202.0(2.0)
50-Sn,,,6.225(2),,,,4.871(3),0.022(5),4.892(6),0.626(9)
50-Sn-112,1,0,6.0(1.0)*,,,,4.5(1.5),0,4.5(1.5),1.00(11)
50-Sn-114,0.66,0,6.0(3),,,,4.8(5),0,4.8(5),0.114(30)
50-Sn-115,0.35,1/2,6.0(1.0)*,,,,4.5(1.5),0.3(3)*,4.8(1.5),30.0(7.0)
50-Sn-116,14.3,0,6.10(1),,,,4.42(7),0,4.42(7),0.14(3)
50-Sn-117,7.61,1/2,6.59(8),0.22(10),-0.23(10),,5.28(8),0.3(3)*,5.6(3),2.3(5)
50-Sn-118,24.03,0,6.23(4),,,,4.63(8),0,4.63(8),0.22(5)
50-Sn-119,8.58,1/2,6.28(3),0.14(10),0.0(1),,4.71(8),0.3(3)*,5.0(3),2.2(5)
50-Sn-120,32.86,0,6.67(4),,,,5.29(8),0,5.29(8),0.14(3)
50-Sn-122,4.72,0,5.93(3),,,,4.14(7),0,4.14(7),0.18(2)
50-Sn-124,5.94,0,6.15(3),,,,4.48(8),0,4.48(8),0.133(5)
51-Sb,,,5.57(3),,,,3.90(4),0.00(7),3.90(6),4.91(5)
51-Sb-121,57.25,5/2,5.71(6),5.7(2),5.8(2),,4.10(9),0.0003(19),4.10(19),5.75(12)
51-Sb-123,42.75,7/2,5.38(7),5.2(2),5.4(2),,3.64(9),0.001(4),3.64(9),3.8(2)
52-Te,,,5.68(2),,,,4.23(4),0.09(6),4.32(5),4.7(1)
52-Te-120,0.09,0,5.3(5),,,,3.5(7),0,3.5(7),2.3(3)
52-Te-122,2.4,0,3.8(2),,,,1.8(2),0,1.8(2),3.4(5)
52-Te-123,0.87,1/2,-0.05(25),-1.2(2),3.5(2),,0.002(3),0.52(5),0.52(5),418.0(30.0)
52-Te-124,4.61,0,7.95(10),,,,8.0(2),0,8.0(2,6.8(1.3)
52-Te-125,6.99,1/2,5.01(8),4.9(2),5.5(2),,3.17(10),0.008(8),3.18(10),1.55(16)
52-Te-126,18.71,0,5.55(7),,,,3.88(10),0,3.88(10),1.04(15)
52-Te-128,31.79,0,5.88(8),,,,4.36(10),0,4.36(10),0.215(8)
52-Te-130,34.48,0,6.01(7),,,,4.55(11),0,4.55(11),0.29(6)
53-I-127,100,5/2,5.28(2),6.6(2),3.4(2),,3.50(3),0.31(6),3.81(7),6.15(6)
54-Xe,,,4.69(4),,,,3.04(4),0,,23.9(1.2)
54-Xe-124,0.1,0,,,,,,0,,165.0(20.0)
54-Xe-126,0.09,0,,,,,,0,,3.5(8)
54-Xe-128,1.9,0,,,,,,0,,<8.0
54-Xe-129,26.14,1/2,,,,,,,,21.0(5.0)
54-Xe-130,3.3,0,,,,,,0,,<26.0
54-Xe-131,21.18,3/2,,,,,,,,85.0(10.0)
54-Xe-132,26.89,0,,,,,,0,,0.45(6)
54-Xe-134,10.4,0,,,,,,0,,0.265(20)
54-Xe-136,8.9,0,,,,,,0,,0.26(2)
55-Cs-133,100,7/2,5.42(2),,,+/-,3.69(15),0.21(5),3.90(6),29.0(1.5)
56-Ba,,,5.07(3),,,,3.23(4),0.15(11),3.38(10),1.1(1)
56-Ba-130,0.1,0,-3.6(6),,,,1.6(5),0,1.6(5),30.0(5.0)
56-Ba-132,0.09,0,7.8(3),,,,7.6(6),0,7.6(6),7.0(8)
56-Ba-134,2.4,0,5.7(1),,,,4.08(14),0,4.08(14),2.0(1.6)
56-Ba-135,6.59,3/2,4.66(10),,,,2.74(12),0.5(5)*,3.2(5),5.8(9)
56-Ba-136,7.81,0,4.90(8),,,,3.03(10),0,3.03(10),0.68(17)
56-Ba-137,11.32,3/2,6.82(10),,,,5.86(17),0.5(5)*,6.4(5),3.6(2)
56-Ba-138,71.66,0,4.83(8),,,,2.94(10),0,2.94(19),0.27(14)
57-La,,,8.24(4),,,,8.53(8),1.13(19),9.66(17),8.97(2)
57-La-138,0.09,5,8.0(2.0)*,,,,8.0(4.0),0.5(5)*,8.5(4.0),57.0(6.0)
57-La-139,99.91,7/2,8.24(4),11.4(3),4.5(4),+/-,8.53(8),1.13(15),9.66(17),8.93(4)
58-Ce,,,4.84(2),,,,2.94(2),0.00(10),2.94(10),0.63(4)
58-Ce-136,0.19,0,5.76(9),,,,4.23(13),0,4.23(13),7.3(1.5)
58-Ce-138,0.26,0,6.65(9),,,,5.64(15),0,5.64(15),1.1(3)
58-Ce-140,88.48,0,4.81(9),,,,2.94(11),0,2.94(11),0.57(4)
58-Ce-142,11.07,0,4.72(9),,,,2.84(11),0,2.84(11),0.95(5)
59-Pr-141,100,5/2,4.58(5),,,+/-,2.64(6),0.015(3),2.66(6),11.5(3)
60-Nd,,,7.69(5),,,,7.43(19),9.2(8),16.6(8),50.5(1.2)
60-Nd-142,27.11,0,7.7(3),,,,7.5(6),0,7.5(6),18.7(7)
60-Nd-143,12.17,7/2,14.0(2.0)*,,,,25.0(7.0),55.0(7.0),80.0(2.0),337.0(10.0)
60-Nd-144,23.85,0,2.8(3),,,,1.0(2),0,1.0(2),3.6(3)
60-Nd-145,8.5,7/2,14.0(2.0)*,,,,25.0(7.0),5.0(5.0)*,30.0(9.0),42.0(2.0)
60-Nd-146,17.22,0,8.7(2),,,,9.5(4),0,9.5(4),1.4(1)
60-Nd-148,5.7,0,5.7(3),,,,4.1(4),0,4.1(4),2.5(2)
60-Nd-150,5.6,0,5.28(20),,,,3.5(3),0,3.5(3),1.2(2)
61-Pm-147,2.62 Y,7/2,12.6(4),,,,20.0(1.3),1.3(2.0),21.3(1.5),168.4(3.5)
62-Sm,,,0.00(5),,,E,0.422(9),39.0(3.0),39.4(3.0),5922.0(56.0)
62-Sm-144,3.1,0,-3.0(4.0)*,,,,1.0(3.0),0,1.0(3.0),0.7(3)
62-Sm-147,15,7/2,14.0(3.0),,,,25.0(11.0),14.0(19.0.),39.0(16.0),57.0(3.0)
62-Sm-148,11.2,0,-3.0(4.0)*,,,,1.0(3.0),0,1.0(3.0),2.4(6)
62-Sm-149,13.8,7/2,18.7(28),,,E,63.5(6),137.0(5.0),200.0(5.0),42080.0(400.0)
62-Sm-150,7.4,0,14.0(3.0),,,,25.0(11.0),0,25.0(11.0),104.0(4.0)
62-Sm-152,26.7,0,-5.0(6),,,,3.1(8),0,3.1(8),206.0(6.0)
62-Sm-154,22.8,0,8.0(1.0),,,,11.0(2.0),0,11.0(2.0),8.4(5)
63-Eu,,,5.3(3),,,E,6.57(4),2.5(4),9.2(4),4530.0(40.0)
63-Eu-151,47.8,5/2,,,,E,5.5(2),3.1(4),8.6(4),9100.0(100.0)
63-Eu-153,52.8,5/2,8.22(12),,,,8.5(2),1.3(7),9.8(7),312.0(7.0)
64-Gd,,,9.5(2),,,E,29.3(8),151.0(2.0),180.0(2.0),49700.0(125.0)
64-Gd-152,0.2,0,10.0(3.0)*,,,,13.0(8.0),0,13.0(8.0),735.0(20.0)
64-Gd-154,2.2,0,10.0(3.0)*,,,,13.0(8.0),0,13.0(8.0),85.0(12.0)
64-Gd-155,14.9,3/2,13.8(3),,,E,40.8(4),25.0(6.0),66.0(6.0),61100.0(400.0)
64-Gd-156,20.6,0,6.3(4),,,,5.0(6),0,5.0(6),1.5(1.2)
64-Gd-157,15.7,3/2,4.0(2.0),,,E,650.0(4.0),394.0(7.0),1044.0(8.0),259000.0(700.0)
64-Gd-158,24.7,0,9.0(2.0),,,,10.0(5.0),0,10.0(5.0),2.2(2)
64-Gd-160,21.7,0,9.15(5),,,,10.52(11),0,10.52(11),0.77(2)
65-Tb-159,100,3/2,7.34(2),6.8(2),8.1(2),+/-,6.84(6),0.004(3),6.84(6),23.4(4)
66-Dy,,,16.9(3),,,,35.9(8),54.4(1.2),90.3(9),994.0(13.0)
66-Dy-156,0.06,0,6.1(5),,,,4.7(8),0,4.7(8),33.0(3.0)
66-Dy-158,0.1,0,6.0(4.0)*,,,,5.0(6.0),0,5.(6.),43.0(6.0)
66-Dy-160,2.3,0,6.7(4),,,,5.6(7),0,5.6(7),56.0(5.0)
66-Dy-161,18.9,5/2,10.3(4),,,,13.3(1.0),3.0(1.0),16.0(1.0),600.0(25.0)
66-Dy-162,25.5,0,-1.4(5),,,,0.25(18),0,0.25(18),194.0(10.0)
66-Dy-163,24.9,5/2,5.0(4),6.1(5),3.5(5),,3.1(5),0.21(19),3.3(5),124.0(7.0)
66-Dy-164,28.2,0,49.4(5),,,,307.0(3.0),0,307.0(3.0),2840.0(40.0)
67-Ho-165,100,7/2,8.44(3),6.9(2),10.3(2),+/-,8.06(8),0.36(3),8.42(16),64.7(1.2)
68-Er,,,7.79(2),,,,7.63(4),1.1(3),8.7(3),159.0(4.0)
68-Er-162,0.14,0,9.01(11),,,,9.7(4),0,9.7(4),19.0(2.0)
68-Er-164,1.6,0,7.95(14),,,,8.4(4),0,8.4(4),13.0(2.0)
68-Er-166,33.4,0,10.51(19),,,,14.1(5),0,14.1(5),19.6(1.5)
68-Er-167,22.9,7/2,3.06(5),5.3(3),0.0(3),,1.1(2),0.13(6),1.2(2),659.0(16.0)
68-Er-168,27,0,7.43(8),,,,6.9(7),0,6.9(7),2.74(8)
68-Er-170,15,0,9.61(6),,,,11.6(1.2),0,11.6(1.2),5.8(3)
69-Tm-169,100,1/2,7.07(3),,,+/-,6.28(5),0.10(7),6.38(9),100.0(2.0)
70-Yb,,,12.41(3),,,,19.42(9),4.0(2),23.4(2),34.8(8)
70-Yb-168,0.14,0,-4.07(2),,,E,2.13(2),0,2.13(2),2230.0(40.0)
70-Yb-170,3,0,6.8(1),,,,5.8(2),0,5.8(2),11.4(1.0)
70-Yb-171,14.3,1/2,9.7(1),6.5(2),19.4(4),,11.7(2),3.9(2),15.6(3),48.6(2.5)
70-Yb-172,21.9,0,9.5(1),,,,11.2(2),0,11.2(2),0.8(4)
70-Yb-173,16.3,5/2,9.56(10),2.5(2),13.3(3),,11.5(2),3.5,15,17.1(1.3)
70-Yb-174,31.8,0,19.2(1),,,,46.8(5),0,46.8(5),69.4(5.0)
70-Yb-176,12.7,0,8.7(1),,,,9.6(2),0,9.6(2),2.85(5)
71-Lu,,,7.21(3),,,,6.53(5),0.7(4),7.2(4),74.0(2.0)
71-Lu-175,97.4,7/2,7.28(9),,,,6.59(5),0.6(4),7.2(4),21.0(3.0)
71-Lu-176,2.6,7,6.1(2),,,,4.7(2),1.2(3),5.9,2065.(35.)
72-Hf,,,7.77(14),,,,7.6(3),2.6(5),10.2(4),104.1(5)
72-Hf-174,0.184,0,10.9(1.1),,,,15.0(3.0),0,15.0(3.0),561.0(35.0)
72-Hf-176,5.2,0,6.61(18),,,,5.5(3),0,5.5(3),23.5(3.1)
72-Hf-177,18.5,0,0.8(1.0)*,,,,0.1(2),0.1(3),0.2(2),373.0(10.0)
72-Hf-178,27.2,0,5.9(2),,,,4.4(3),0,4.4(3),84.0(4.0)
72-Hf-179,13.8,9/2,7.46(16),,,,7.0(3),0.14(2),7.1(3),41.0(3.0)
72-Hf-180,35.1,0,13.2(3),,,,21.9(1.0),0,21.9(1.0),13.04(7)
73-Ta,,,6.91(7),,,,6.00(12),0.01(17),6.01(12),20.6(5)
73-Ta-180,0.012,9,7.0(2.0)*,,,,6.2(3.5),0.5(5)*,7.0(4.0),563.0(60.0)
73-Ta-181,99.98,7/2,6.91(7),,,+/-,6.00(12),0.011(2),6.01(12),20.5(5)
74-W,,,4.755(18),,,,2.97(2),1.63(6),4.60(6),18.3(2)
74-W-180,0.13,0,5.0(3.0)*,,,,3.0(4.0),0,3.0(4.0),30.0(20.0)
74-W-182,26.3,1/2,7.04(4),,,,6.10(7),0,6.10(7),20.7(5)
74-W-183,14.3,1/2,6.59(4),6.3(4),7.0(4),,5.36(7),0.3(3)*,5.7(3),10.1(3)
74-W-184,30.7,0,7.55(6),,,,7.03(11),0,7.03(11),1.7(1)
74-W-186,28.6,0,-0.73(4),,,,0.065(7),0,0.065(7),37.9(6)
75-Re,,,9.2(2),,,,10.6(5),0.9(6),11.5(3),89.7(1.0)
75-Re-185,37.5,5/2,9.0(3),,,,10.2(7),0.5(9),10.7(6),112.0(2.0)
75-Re-187,62.5,5/2,9.3(3),,,,10.9(7),1.0(6),11.9(4),76.4(1.0)
76-Os,,,10.7(2),,,,14.4(5),0.3(8),14.7(6),16.0(4.0)
76-Os-184,0.02,0,10.0(2.0)*,,,,13.0(5.0),0,13.0(5.0),3000.0(150.0)
76-Os-186,1.6,0,12.0(1.7),,,,17.0(5.0),0,17.0(5.0),80.0(13.0)
76-Os-187,1.6,1/2,10.0(2.0)*,,,,13.0(5.0),0.3(3)*,13.0(5.0),320.0(10.0)
76-Os-188,13.3,0,7.8(3),,,,7.3(6),0,7.3(6),4.7(5)
76-Os-189,16.1,3/2,11.0(3),,,,14.4(8),0.5(5)*,14.9(9),25.0(4.0)
76-Os-190,26.4,0,11.4(3),,,,15.2(8),0,15.2(8),13.1(3)
76-Os-192,41,0,11.9(4),,,,16.6(1.2),0,16.6(1.2),2.0(1)
77-Ir,,,10.6(3),,,,14.1(8),0.0(3.0),14.0(3.0),425.0(2.0)
77-Ir-191,37.4,3/2,,,,,,,,954.0(10.0)
77-Ir-193,62.6,3/2,,,,,,,,111.0(5.0)
78-Pt,,,9.60(1),,,,11.58(2),0.13(11),11.71(11),10.3(3)
78-Pt-190,0.01,0,9.0(1.0),,,,10.0(2.0),0,10.0(2.0),152.0(4.0)
78-Pt-192,1.78,0,9.9(5),,,,12.3(1.2),0,12.3(1.2),10.0(2.5)
78-Pt-194,32.9,0,10.55(8),,,,14.0(2),0,14.0(2),1.44(19)
78-Pt-195,33.8,1/2,8.91(9),9.5(3),7.2(3),+/-,9.8(2),0.13(4),9.9(2),27.5(1.2)
78-Pt-196,25.3,0,9.89(8),,,,12.3(2),0,12.3(2),0.72(4)
78-Pt-198,7.2,0,7.8(1),,,,7.6(2),0,7.6(2),3.66(19)
79-Au-197,100,3/2,7.90(7),6.26(10),9.90(14),+/-,7.32(12),0.43(5),7.75(13),98.65(9)
80-Hg,,,12.595(45),,,,20.24(5),6.6(1),26.8(1),372.3(4.0)
80-Hg-196,0.15,0,30.3(1.0),,,E,115.0(8.0),0,115.0(8.0),3080.0(180.0)
80-Hg-198,10.1,0,,,,,,0,,2.0(3)
80-Hg-199,16.9,0,16.9(4),,,E,36.0(2.0),30.0(3.0),66.0(2.0),2150.0(48.0)
80-Hg-200,23.1,0,,,,,,0,,<60.0
80-Hg-201,13.2,3/2,,,,,,,,7.8(2.0)
80-Hg-202,29.7,0,11.002(43),,,,15.2108(2),0,15.2108(2),4.89(5)
80-Hg-204,6.8,0,,,,,,0,,0.43(10)
81-Tl,,,8.776(5),,,,9.678(11),0.21(15),9.89(15),3.43(6)
81-Tl-203,29.5,1/2,8.51(8),9.08(10),6.62(10),,6.14(28),0.14(4),6.28(28),11.4(2)
81-Tl-205,70.5,1/2,8.87(7),5.15(10),9.43(10),+/-,11.39(17),0.007(1),11.40(17),0.104(17)
82-Pb,,,9.401(2),,,,11.115(7),0.0030(7),11.118(7),0.171(2)
82-Pb-204,1.4,0,10.893(78),,,,12.3(2),0,12.3(2),0.65(7)
82-Pb-206,24.1,0,9.221(78),,,,10.68(12),0,10.68(12),0.0300(8)
82-Pb-207,22.1,1/2,9.286(16),,,+/-,10.82(9),0.002(2),10.82(9),0.699(10)
82-Pb-208,52.4,0,9.494(30),,,,11.34(5),0,11.34(5),0.00048(3)
83-Bi-209,100,9/2,8.532(2),8.26(1),8.74(1),,9.148(4),0.0084(19),9.156(4),0.0338(7)
88-Ra-226,1620 Y,0,10.0(1.0),,,,13.0(3.0),0,13.0(3.0),12.8(1.5)
90-Th-232,100,0,10.31(3),,,,13.36(8),0,13.36(8),7.37(6)
91-Pa-231,32500 Y,3/2,9.1(3),,,,10.4(7),0.1(3.3),10.5(3.2),200.6(2.3)
92-U,,,8.417(5),,,,8.903(11),0.005(16),8.908(11),7.57(2)
92-U-233,159000 Y,5/2,10.1(2),,,,12.8(5),0.1(6),12.9(3),574.7(1.0)
92-U-234,0.005,0,12.4(3),,,,19.3(9),0,19.3(9),100.1(1.3)
92-U-235,0.72,7/2,10.50(3),,,,13.78(11),0.2(2),14.0(2),680.9(1.1)
92-U-238,99.27,0,8.407(7),,,,8.871(11),0,8.871(11),2.68(2)
93-Np-237,2140000 Y,5/2,10.55(10),,,,14.0(3),0.5(5)*,14.5(6),175.9(2.9)
94-Pu-239,24400 Y,1/2,7.7(1),,,,7.5(2),0.2(6),7.7(6),1017.3(2.1)
94-Pu-240,6540 Y,0,3.5(1),,,,1.54(9),0,1.54(9),289.6(1.4)
94-Pu-242,376000 Y,0,8.1(1),,,,8.2(2),0,8.2(2),18.5(5)
95-Am-243,7370 Y,5/2,8.3(2),,,,8.7(4),0.3(2.6),9.0(2.6),75.3(1.8)
96-Cm-244,17.9 Y,0,9.5(3),,,,11.3(7),0,11.3(7),16.2(1.2)
96-Cm-246,4700 Y,0,9.3(2),,,,10.9(5),0,10.9(5),1.36(17)
96-Cm-248,340000 Y,0,7.7(2),,,,7.5(4),0,7.5(4),3.00(26)\
"""
# Imaginary values for select isotopes
# isotope, b_c_i, bp_i, bm_i
nsftableI="""\
2-He-3,-1.48,,-5.925
3-Li-6,-0.26,-0.08(1),-0.62(2)
5-B,-0.21,,
47-Ag-107,-0.01,,
47-Ag-109,-0.025,,
48-Cd,-1.2,,
48-Cd-113,-12,,
49-In,-0.054,,
49-In-115,-0.056,,
52-Te-123,-0.1,,
62-Sm,-1.5,,
62-Sm-149,-11,,
64-Gd,-13.6,,
64-Gd-155,-10.3,,
71-Lu-176,-0.57(2),,
80-Hg-196,-0.8,,\
"""
# Excluding the following because the measurements for the real parts
# were not used in nsftable table.
# 63-Eu-151,-2.46,,
# 64-Gd-157,-47,-75,
def fix_number(str):
"""
Converts strings of the form e.g., 35.24(2)* into numbers without
uncertainty. Also accepts a limited range, e.g., <1e-6, which is
converted as 1e-6. Missing values are set to 0.
"""
if str == '': return None
idx = str.find('(')
if idx >= 0: str = str[0:idx]
if str[0] == '<': str = str[1:]
return float(str)
def sld_table(wavelength=1, table=None, isotopes=True):
"""
Scattering length density table for wavelength 4.75 |Ang|.
:Parameters:
*table* : PeriodicTable
If *table* is not specified, use the common periodic table.
*isotopes* = True : boolean
Whether to consider isotopes or not.
:Returns: None
Example
>>> sld_table(wavelength=4.75) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Neutron scattering length density table
atom mass density sld imag incoh
H 1.008 0.071 -1.582 0.000 10.690
1-H 1.008 0.071 -1.583 0.000 10.691
D 2.014 0.141 2.823 0.000 1.709
T 3.016 0.212 2.027 0.000 0.446
He 4.003 0.122 0.598 0.000 0.000
3-He 3.016 0.092 1.054 0.272 0.655 *
4-He 4.003 0.122 0.598 0.000 0.000
...
248-Cm 248.072 13.569 2.536 0.000 0.000
* Energy dependent cross sections
"""
table = default_table(table)
# Table for comparison with scattering length density calculators
# b_c for Sc, Te, Xe, Sm, Eu, Gd, W, Au, Hg are different from Neutron News
# The Rauch data have cited references to back up the numbers
# (see doc directory), though it is not clear what criteria are
# used to select amongst the available measurements.
print(" Neutron scattering length density table")
print("%-7s %7s %7s %7s %7s %7s"
%('atom', 'mass', 'density', 'sld', 'imag', 'incoh'))
for el in table:
if el.neutron.has_sld():
coh,jcoh,inc = el.neutron.sld(wavelength=wavelength)
print("%-7s %7.3f %7.3f %7.3f %7.3f %7.3f%s"
%(el,el.mass,el.density,coh,jcoh,inc,
' *' if el.neutron.is_energy_dependent else ''))
if isotopes:
isos = [iso for iso in el if iso.neutron != None and iso.neutron.has_sld()]
else:
isos = []
for iso in isos:
coh,jcoh,inc = iso.neutron.sld(wavelength=wavelength)
print("%-7s %7.3f %7.3f %7.3f %7.3f %7.3f%s"
%(iso,iso.mass,iso.density,coh,jcoh,inc,
' *' if iso.neutron.is_energy_dependent else ''))
print("* Energy dependent cross sections")
def energy_dependent_table(table=None):
"""
Prints a table of energy dependent isotopes.
:Parameters:
*table* : PeriodicTable
If *table* is not specified, use the common periodic table.
:Returns: None
Example
>>> energy_dependent_table()
Elements and isotopes with energy dependent absorption:
He-3
Cd Cd-113
Sm Sm-149
Eu Eu-151
Gd Gd-155 Gd-157
Yb-168
Hg-196 Hg-199
"""
table = default_table(table)
# List of energy dependent elements and isotopes
print("Elements and isotopes with energy dependent absorption:")
for el in table:
if not hasattr(el,'neutron'): continue
dep = []
if el.neutron.is_energy_dependent:
dep += [str(el)]
dep += [str(el)+'-'+str(iso.isotope)
for iso in el
if iso.neutron != None and iso.neutron.is_energy_dependent]
if len(dep) > 0: print(" "+" ".join(dep))
def _diff(iso,a,b,tol=0.01):
if None in (a,b):
if a is not None or b is not None:
if a is None and b > tol:
print("%10s %8s %8.2f"%(iso, "----", b))
elif b is None and a > tol:
print("%10s %8.2f %8s"%(iso, a, "----"))
elif abs(a - b) > tol:
print("%10s %8.2f %8.2f %5.1f%%"%(iso, a, b, 100*(a-b)/b if b!=0 else inf))
def compare(fn1, fn2, table=None, tol=0.01):
table = default_table(table)
for el in table:
try: res1 = fn1(el)
except: res1 = None
try: res2 = fn2(el)
except: res2 = None
_diff(el, res1, res2, tol=tol)
for iso in el:
try: res1 = fn1(iso)
except: res1 = None
try: res2 = fn2(iso)
except: res2 = None
_diff(iso, res1, res2, tol=tol)
def absorption_comparison_table(table=None, tol=None):
"""
Prints a table comparing absorption to the imaginary bound coherent
scattering length b_c_i. This is used to checking the integrity
of the data and formula.
The relationship between absorption and b_c_i is:
.. math::
\sigma_a = -2 \lambda b_i \cdot 1000
The wavelength $\lambda = 1.798 \AA$ is the neutron wavelength at which
the absorption is tallied. The factor of 1000 transforms from
|Ang|\ |cdot|\ fm to barn.
:Parameters:
*table* : PeriodicTable
The default periodictable unless a specific table has been requested.
*tol* = 0.01 : float | barn
Show differences greater than this amount.
:Returns: None
Example
>>> absorption_comparison_table (tol=0.5) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Comparison of absorption and (-2000 lambda b_c_i)
3-He 5333.00 5322.08 0.2%
Li 70.50 ----
6-Li 940.00 934.96 0.5%
B 767.00 755.16 1.6%
10-B 3835.00 ----
N 1.90 ----
...
"""
print("Comparison of absorption and (-2000 lambda b_c_i)")
compare(lambda el: el.neutron.absorption,
lambda el: -2000*el.neutron.b_c_i*ABSORPTION_WAVELENGTH,
table=table, tol=tol)
return
def coherent_comparison_table(table=None, tol=None):
"""
Prints a table of $4 \pi b_c^2/100$ and coherent for each isotope.
This is useful for checking the integrity of the data and formula.
The table only prints where b_c exists.
:Parameters:
*table* : PeriodicTable
The default periodictable unless a specific table has been requested.
*tol* = 0.01 : float | barn
Amount of difference to show
:Returns: None
Example
>>> coherent_comparison_table (tol=0.5) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Comparison of (4 pi b_c^2/100) and coherent
n 172.03 43.01 300.0%
1-n 172.03 43.01 300.0%
Sc 18.40 19.00 -3.2%
45-Sc 18.40 19.00 -3.2%
65-Cu 13.08 14.10 -7.2%
70-Zn 5.98 4.50 33.0%
84-Sr 3.14 6.00 -47.6%
...
"""
import numpy
print("Comparison of (4 pi b_c^2/100) and coherent")
compare(lambda el: 4*pi/100*el.neutron.b_c**2,
lambda el: el.neutron.coherent,
table=table, tol=tol)
def total_comparison_table(table=None, tol=None):
"""
Prints a table of neutron.total and sum coh,inc for each
isotope where these exist. This is used to checking the integrity
of the data and formula.
:Parameters:
*table* : PeriodicTable
The default periodictable unless a specific table has been requested.
*tol* = 0.01 : float | barn
Amount of difference to show
:Returns: None
Example
>>> total_comparison_table (tol=0.1)
Comparison of total cross section to (coherent + incoherent)
n 43.01 ----
1-n 43.01 ----
84-Kr 6.60 ----
149-Sm 200.00 200.50 -0.2%
Eu 9.20 9.07 1.4%
Gd 180.00 180.30 -0.2%
155-Gd 66.00 65.80 0.3%
161-Dy 16.00 16.30 -1.8%
180-Ta 7.00 6.70 4.5%
187-Os 13.00 13.30 -2.3%
"""
print("Comparison of total cross section to (coherent + incoherent)")
compare(lambda el: el.neutron.total,
lambda el: el.neutron.coherent+el.neutron.incoherent,
table=table, tol=tol)
def incoherent_comparison_table(table=None, tol=None):
"""
Prints a table of incoherent computed from total and b_c with incoherent.
:Parameters:
*table* : PeriodicTable
The default periodictable unless a specific table has been requested.
*tol* = 0.01 : float | barn
Amount of difference to show
:Returns: None
Example
>>> incoherent_comparison_table (tol=0.5) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Comparison of incoherent and (total - 4 pi b_c^2/100)
Sc 4.50 5.10 -11.8%
45-Sc 4.50 5.10 -11.8%
65-Cu 0.40 1.42 -71.7%
70-Zn 0.00 -1.48 -100.0%
84-Sr 0.00 2.86 -100.0%
113-Cd 0.30 4.36 -93.1%
...
"""
print("Comparison of incoherent and (total - 4 pi b_c^2/100)")
compare(lambda el: el.neutron.incoherent,
lambda el: el.neutron.total - 4*pi/100*el.neutron.b_c**2,
table=table, tol=tol)
| rogcg/environment | externals/periodictable/nsf.py | Python | unlicense | 65,497 | [
"Avogadro"
] | 9ae441614a1f5249b129d5d745775e4d94c6f893e733b8cee57557c11fdaeed2 |
"""
Vision-specific analysis functions.
$Id: featureresponses.py 7714 2008-01-24 16:42:21Z antolikjan $
"""
__version__='$Revision: 7714 $'
from math import pi,sin,cos
import numpy
from numpy.oldnumeric import Float
from numpy import zeros, array, size, object_
#import scipy
import param
from param import normalize_path
from topo.misc.distribution import DSF_WeightedAverage
try:
import matplotlib
import pylab
except ImportError:
param.Parameterized(name=__name__).warning("Could not import matplotlib; module will not be useable.")
from topo.command import ImportErrorRaisingFakeModule
pylab = ImportErrorRaisingFakeModule("matplotlib") # pyflakes:ignore (try/except import)
# CEBALERT: commands in here should inherit from the appropriate base
# class (Command or PylabPlotCommand).
import topo
from topo.base.cf import CFSheet
from topo.base.sheetview import SheetView
from topo.plotting.plotgroup import create_plotgroup
from topo.command.analysis import measure_sine_pref
from topo import numbergen
max_value = 0
global_index = ()
def _complexity_rec(x,y,index,depth,fm):
"""
Recurrent helper function for complexity()
"""
global max_value
global global_index
if depth<size(fm.features):
for i in range(size(fm.features[depth].values)):
_complexity_rec(x,y,index + (i,),depth+1,fm)
else:
if max_value < fm.full_matrix[index][x][y]:
global_index = index
max_value = fm.full_matrix[index][x][y]
def complexity(full_matrix):
global global_index
global max_value
"""This function expects as an input a object of type FullMatrix which contains
responses of all neurons in a sheet to stimuly with different varying parameter values.
One of these parameters (features) has to be phase. In such case it computes the classic
modulation ratio (see Hawken et al. for definition) for each neuron and returns them as a matrix.
"""
rows,cols = full_matrix.matrix_shape
complex_matrix = zeros(full_matrix.matrix_shape,object_)
fftmeasure = zeros(full_matrix.matrix_shape,Float)
i = 0
for f in full_matrix.features:
if f.name == "phase":
phase_index = i
break
i=i+1
sum = 0.0
res = 0.0
average = 0.0
print "Z"
print size(full_matrix.features)
print full_matrix.features[0].values
for x in range(rows):
for y in range(cols):
complex_matrix[x,y] = []#
max_value=-0.01
global_index = ()
_complexity_rec(x,y,(),0,full_matrix)
#compute the sum of the responses over phases given the found index of highest response
iindex = array(global_index)
sum = 0.0
for i in range(size(full_matrix.features[phase_index].values)):
iindex[phase_index] = i
sum = sum + full_matrix.full_matrix[tuple(iindex.tolist())][x][y]
#average
average = sum / float(size(full_matrix.features[phase_index].values))
res = 0.0
#compute the sum of absolute values of the responses minus average
for i in range(size(full_matrix.features[phase_index].values)):
iindex[phase_index] = i
res = res + abs(full_matrix.full_matrix[tuple(iindex.tolist())][x][y] - average)
complex_matrix[x,y] = complex_matrix[x,y] + [full_matrix.full_matrix[tuple(iindex.tolist())][x][y]]
if x==43 and y==43:
pylab.figure()
ax = pylab.subplot(111)
z = complex_matrix[x,y][:]
z.append(z[0])
pylab.plot(z,linewidth=4)
pylab.axis(xmin=0.0,xmax=numpy.pi)
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(4))
pylab.xticks([0,len(z)/2,len(z)-1], ['0','pi/2','pi'])
pylab.savefig(normalize_path(str(topo.sim.time()) + str(complex_matrix[x,y][0])+ 'modulation_response[43,43].png'))
if x==45 and y==45:
pylab.figure()
ax = pylab.subplot(111)
z = complex_matrix[x,y][:]
z.append(z[0])
pylab.plot(z,linewidth=4)
pylab.axis(xmin=0.0,xmax=numpy.pi)
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(4))
pylab.xticks([0,len(z)/2,len(z)-1], ['0','pi/2','pi'])
pylab.savefig(normalize_path(str(topo.sim.time()) + str(complex_matrix[x,y][0])+ 'modulation_response[45,45].png'))
fft = numpy.fft.fft(complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y]+complex_matrix[x,y],2048)
first_har = 2048/len(complex_matrix[0,0])
if abs(fft[0]) != 0:
fftmeasure[x,y] = 2 *abs(fft[first_har]) /abs(fft[0])
else:
fftmeasure[x,y] = 0
return fftmeasure
def compute_ACDC_orientation_tuning_curves(full_matrix,curve_label,sheet):
""" This function allows and alternative computation of orientation tuning curve where
for each given orientation the response is computed as a maximum of AC or DC component
across the phases instead of the maximum used as a standard in Topographica"""
# this method assumes that only single frequency has been used
i = 0
for f in full_matrix.features:
if f.name == "phase":
phase_index = i
if f.name == "orientation":
orientation_index = i
if f.name == "frequency":
frequency_index = i
i=i+1
print sheet.curve_dict
if not sheet.curve_dict.has_key("orientationACDC"):
sheet.curve_dict["orientationACDC"]={}
sheet.curve_dict["orientationACDC"][curve_label]={}
rows,cols = full_matrix.matrix_shape
for o in xrange(size(full_matrix.features[orientation_index].values)):
s_w = zeros(full_matrix.matrix_shape)
for x in range(rows):
for y in range(cols):
or_response=[]
for p in xrange(size(full_matrix.features[phase_index].values)):
index = [0,0,0]
index[phase_index] = p
index[orientation_index] = o
index[frequency_index] = 0
or_response.append(full_matrix.full_matrix[tuple(index)][x][y])
fft = numpy.fft.fft(or_response+or_response+or_response+or_response,2048)
first_har = 2048/len(or_response)
s_w[x][y] = numpy.maximum(2 *abs(fft[first_har]),abs(fft[0]))
s = SheetView((s_w,sheet.bounds), sheet.name , sheet.precedence, topo.sim.time(),sheet.row_precedence)
sheet.curve_dict["orientationACDC"][curve_label].update({full_matrix.features[orientation_index].values[o]:s})
def phase_preference_scatter_plot(sheet_name,diameter=0.39):
r = numbergen.UniformRandom(seed=1023)
preference_map = topo.sim[sheet_name].sheet_views['PhasePreference']
offset_magnitude = 0.03
datax = []
datay = []
(v,bb) = preference_map.view()
for z in zeros(66):
x = (r() - 0.5)*2*diameter
y = (r() - 0.5)*2*diameter
rand = r()
xoff = sin(rand*2*pi)*offset_magnitude
yoff = cos(rand*2*pi)*offset_magnitude
xx = max(min(x+xoff,diameter),-diameter)
yy = max(min(y+yoff,diameter),-diameter)
x = max(min(x,diameter),-diameter)
y = max(min(y,diameter),-diameter)
[xc1,yc1] = topo.sim[sheet_name].sheet2matrixidx(xx,yy)
[xc2,yc2] = topo.sim[sheet_name].sheet2matrixidx(x,y)
if((xc1==xc2) & (yc1==yc2)): continue
datax = datax + [v[xc1,yc1]]
datay = datay + [v[xc2,yc2]]
for i in range(0,len(datax)):
datax[i] = datax[i] * 360
datay[i] = datay[i] * 360
if(datay[i] > datax[i] + 180): datay[i]= datay[i]- 360
if((datax[i] > 180) & (datay[i]> 180)): datax[i] = datax[i] - 360; datay[i] = datay[i] - 360
if((datax[i] > 180) & (datay[i] < (datax[i]-180))): datax[i] = datax[i] - 360; #datay[i] = datay[i] - 360
f = pylab.figure()
ax = f.add_subplot(111, aspect='equal')
pylab.plot(datax,datay,'ro')
pylab.plot([0,360],[-180,180])
pylab.plot([-180,180],[0,360])
pylab.plot([-180,-180],[360,360])
ax.axis([-180,360,-180,360])
pylab.xticks([-180,0,180,360], [-180,0,180,360])
pylab.yticks([-180,0,180,360], [-180,0,180,360])
pylab.grid()
pylab.savefig(normalize_path(str(topo.sim.timestr()) + sheet_name + "_scatter.png"))
###############################################################################
# JABALERT: Should we move this plot and command to analysis.py or
# pylabplot.py, where all the rest are?
#
# In any case, it requires generalization; it should not be hardcoded
# to any particular map name, and should just do the right thing for
# most networks for which it makes sense. E.g. it already measures
# the ComplexSelectivity for all measured_sheets, but then
# plot_modulation_ratio only accepts two with specific names.
# plot_modulation_ratio should just plot whatever it is given, and
# then analyze_complexity can simply pass in whatever was measured,
# with the user controlling what is measured using the measure_map
# attribute of each Sheet. That way the complexity of any sheet could
# be measured, which is what we want.
#
# Specific changes needed:
# - Make plot_modulation_ratio accept a list of sheets and
# plot their individual modulation ratios and combined ratio.
# - Remove complex_sheet_name argument, which is no longer needed
# - Make sure it still works fine even if V1Simple doesn't exist;
# as this is just for an optional scatter plot, it's fine to skip
# it.
# - Preferably remove the filename argument by default, so that
# plots will show up in the GUI
def analyze_complexity(full_matrix,simple_sheet_name,complex_sheet_name,filename=None):
"""
Compute modulation ratio for each neuron, to distinguish complex from simple cells.
Uses full_matrix data obtained from measure_or_pref().
If there is a sheet named as specified in simple_sheet_name,
also plots its phase preference as a scatter plot.
"""
import topo
measured_sheets = [s for s in topo.sim.objects(CFSheet).values()
if hasattr(s,'measure_maps') and s.measure_maps]
for sheet in measured_sheets:
# Divide by two to get into 0-1 scale - that means simple/complex boundry is now at 0.5
complx = array(complexity(full_matrix[sheet]))/2.0
# Should this be renamed to ModulationRatio?
sheet.sheet_views['ComplexSelectivity']=SheetView((complx,sheet.bounds), sheet.name , sheet.precedence, topo.sim.time(),sheet.row_precedence)
import topo.command.pylabplot
topo.command.pylabplot.plot_modulation_ratio(full_matrix,simple_sheet_name=simple_sheet_name,complex_sheet_name=complex_sheet_name,filename=filename)
# Avoid error if no simple sheet exists
try:
phase_preference_scatter_plot(simple_sheet_name,diameter=0.24999)
except AttributeError:
print "Skipping phase preference scatter plot; could not analyze region %s." \
% simple_sheet_name
class measure_and_analyze_complexity(measure_sine_pref):
"""Macro for measuring orientation preference and then analyzing its complexity."""
def __call__(self,**params):
fm = super(measure_and_analyze_complexity,self).__call__(**params)
#analyze_complexity(fm,simple_sheet_name="V1Simple",complex_sheet_name="V1Complex",filename="ModulationRatio")
return fm
pg= create_plotgroup(name='Orientation Preference and Complexity',category="Preference Maps",
doc='Measure preference for sine grating orientation.',
pre_plot_hooks=[measure_and_analyze_complexity.instance(
preference_fn=DSF_WeightedAverage( value_scale=(0., 1./pi),selectivity_scale=(0.,17.0)))])
pg.add_plot('Orientation Preference',[('Hue','OrientationPreference')])
pg.add_plot('Orientation Preference&Selectivity',[('Hue','OrientationPreference'),
('Confidence','OrientationSelectivity')])
pg.add_plot('Orientation Selectivity',[('Strength','OrientationSelectivity')])
pg.add_plot('Modulation Ratio',[('Strength','ComplexSelectivity')])
pg.add_plot('Phase Preference',[('Hue','PhasePreference')])
pg.add_static_image('Color Key','command/or_key_white_vert_small.png')
__all__ = [
"measure_and_analyze_complexity",
"complexity",
"compute_ACDC_orientation_tuning_curves",
"phase_preference_scatter_plot",
"analyze_complexity",
]
| ioam/svn-history | topo/analysis/vision.py | Python | bsd-3-clause | 12,727 | [
"NEURON"
] | b9c9682195ecb5c79af7ecd8797a9680a5337f4f29449ac5ab46d550600d88a8 |
"""
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
quad_vec -- General purpose integration of vector-valued functions
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose n-dimensional integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
quad_explain -- Print information for use of quad
newton_cotes -- Weights and error coefficient for Newton-Cotes integration
IntegrationWarning -- Warning on issues during integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz -- Use trapezoidal rule to compute integral.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Solving initial value problems for ODE systems
==============================================
The solvers are implemented as individual classes which can be used directly
(low-level usage) or through a convenience function.
.. autosummary::
:toctree: generated/
solve_ivp -- Convenient function for ODE integration.
RK23 -- Explicit Runge-Kutta solver of order 3(2).
RK45 -- Explicit Runge-Kutta solver of order 5(4).
DOP853 -- Explicit Runge-Kutta solver of order 8.
Radau -- Implicit Runge-Kutta solver of order 5.
BDF -- Implicit multi-step variable order (1 to 5) solver.
LSODA -- LSODA solver from ODEPACK Fortran package.
OdeSolver -- Base class for ODE solvers.
DenseOutput -- Local interpolant for computing a dense output.
OdeSolution -- Class which represents a continuous ODE solution.
Old API
-------
These are the routines developed earlier for scipy. They wrap older solvers
implemented in Fortran (mostly ODEPACK). While the interface to them is not
particularly convenient and certain features are missing compared to the new
API, the solvers themselves are of good quality and work fast as compiled
Fortran code. In some cases it might be worth using this old API.
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
Solving boundary value problems for ODE systems
===============================================
.. autosummary::
:toctree: generated/
solve_bvp -- Solve a boundary value problem for a system of ODEs.
"""
from __future__ import division, print_function, absolute_import
from .quadrature import *
from .odepack import *
from .quadpack import *
from ._ode import *
from ._bvp import solve_bvp
from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
from ._quad_vec import quad_vec
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| gertingold/scipy | scipy/integrate/__init__.py | Python | bsd-3-clause | 3,900 | [
"Gaussian"
] | 6352044c636fc853b5aa5a71de9d1d1e30c4e517424f748584e92cbfc12a76a0 |
#! /usr/bin/env python3
# Adapted from http://kitchingroup.cheme.cmu.edu/blog/2013/02/18/Nonlinear-curve-fitting/
import glob
import numpy as np # type: ignore
import pandas as pd # type: ignore
from scipy.optimize import leastsq # type: ignore
import argparse
import warnings
warnings.filterwarnings("ignore", category=UserWarning,
module="pymatgen")
from pymatgen.io.vasp import Vasprun # type: ignore
from pymatgen.io.vasp.outputs import UnconvergedVASPWarning # type: ignore
import matplotlib # type: ignore
matplotlib.use('agg')
import matplotlib.pyplot as plt # type: ignore
from vasppy.poscar import Poscar
from vasppy.summary import find_vasp_calculations
from vasppy.utils import match_filename
def parse_args():
parser = argparse.ArgumentParser(description='Perform a Murnaghan equation of state fit across VASP subdirectories')
parser.add_argument( '-p', '--plot', action='store_true', help='generate murn.pdf plot of fit' )
parser.add_argument( '-v', '--verbose', action='store_true', help='verbose output' )
args = parser.parse_args()
return args
def read_vasprun( filename ):
return Vasprun( filename, parse_potcar_file=False, parse_dos=False, parse_eigen=False )
def read_data( verbose=True ):
dir_list = find_vasp_calculations()
if not dir_list:
raise ValueError( 'Did not find any subdirectories containing vasprun.xml or vasprun.xml.gz files' )
data = []
for d in dir_list:
converged = True
try:
with warnings.catch_warnings(record=True) as w:
vasprun = read_vasprun( match_filename( d + 'vasprun.xml' ) )
for warning in w:
if isinstance( warning.message, UnconvergedVASPWarning ):
converged = False
else:
print( warning.message )
except:
continue
poscar = Poscar.from_file( d + 'POSCAR' )
data.append( [ poscar.scaling,
vasprun.final_structure.volume,
vasprun.final_energy,
converged ] )
column_titles = [ 'scaling', 'volume', 'energy', 'converged' ]
df = pd.DataFrame( data, columns=column_titles ).sort_values( by='scaling' )
df = df.reset_index( drop=True )
df['scaling_factor'] = df.volume / df.scaling**3
scaling_factor_round = 4
if verbose:
print( df.to_string(index=False) )
if len( set( df.scaling_factor.round( scaling_factor_round ) ) ) != 1:
raise ValueError( "POSCAR scaling factors and volumes are inconsistent" )
return df
def murnaghan( vol, e0, b0, bp, v0 ):
"""
Calculate the energy as a function of volume, using the Murnaghan equation of state
[Murnaghan, Proc. Nat. Acad. Sci. 30, 244 (1944)]
https://en.wikipedia.org/wiki/Murnaghan_equation_of_state
cf. Fu and Ho, Phys. Rev. B 28, 5480 (1983).
Args:
vol (float): this volume.
e0 (float): energy at the minimum-energy volume, E0.
b0 (float): bulk modulus at the minimum-energy volume, B0.
bp (float): pressure-derivative of the bulk modulus at the minimum-energy volume, B0'.
v0 (float): volume at the minimum-energy volume, V0.
Returns:
(float): The energy at this volume.
"""
energy = e0 + b0 * vol / bp * (((v0 / vol)**bp) / (bp - 1) + 1) - v0 * b0 / (bp - 1.0)
return energy
def objective( pars, x, y ):
err = y - murnaghan( x, *pars )
return err
def lstsq_fit( volumes, energies ):
e_min = energies.min()
v_min = volumes[ np.argwhere( energies == e_min )[0][0] ]
x0 = [ e_min, 2.0, 10.0, v_min ] #initial guess of parameters
plsq = leastsq( objective, x0, args=( volumes, energies ) )
return plsq
def make_plot( df, fit_params ):
v_min = df.volume.min()*0.99
v_max = df.volume.max()*1.01
v_fitting = np.linspace( v_min, v_max, num=50 )
e_fitting = murnaghan( v_fitting, *fit_params )
plt.figure( figsize=(8.0,6.0) )
# plot converged data points
loc = df.converged
plt.plot( df[loc].volume, df[loc].energy, 'o' )
# plot unconverged data points
loc = [ not b for b in df.converged ]
plt.plot( df[loc].volume, df[loc].energy, 'o', c='grey' )
# plot fitted equation of state curve
plt.plot( v_fitting, e_fitting, '--' )
plt.xlabel( r'volume [$\mathrm{\AA}^3$]' )
plt.ylabel( r'energy [eV]' )
plt.tight_layout()
plt.savefig( 'murn.pdf' )
def fit( verbose=False, plot=False ):
df = read_data( verbose=verbose )
e0, b0, bp, v0 = lstsq_fit( np.array( df.volume ), np.array( df.energy ) )[0]
if plot:
make_plot( df, ( e0, b0, bp, v0 ) )
print( "E0: {:.4f}".format( e0 ) )
print( "V0: {:.4f}".format( v0 ) )
print( "opt. scaling: {:.5f}".format( ( v0 / df.scaling_factor.mean() )**(1/3) ) )
def main():
args = parse_args()
fit( verbose=args.verbose, plot=args.plot )
if __name__ == '__main__':
main()
| bjmorgan/vasppy | vasppy/scripts/murnfit.py | Python | mit | 5,037 | [
"VASP",
"pymatgen"
] | a5cd759c1670edbf29a78e2c70eafc3a4e207cee8981764147dda3f701be2740 |
"""
Dimensions of physical quantities
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2018, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
from itertools import chain
from sympy import Symbol, sympify, Rational
from functools import wraps
#: mass
mass = Symbol("(mass)", positive=True)
#: length
length = Symbol("(length)", positive=True)
#: time
time = Symbol("(time)", positive=True)
#: temperature
temperature = Symbol("(temperature)", positive=True)
#: angle
angle = Symbol("(angle)", positive=True)
#: current_mks
current_mks = Symbol("(current_mks)", positive=True)
#: luminous_intensity
luminous_intensity = Symbol("(luminous_intensity)", positive=True)
#: dimensionless
dimensionless = sympify(1)
#: logarithmic
logarithmic = Symbol("(logarithmic)", positive=True)
#: A list of all of the base dimensions
base_dimensions = [
mass,
length,
time,
temperature,
angle,
current_mks,
dimensionless,
luminous_intensity,
logarithmic,
]
#
# Derived dimensions
#
# rate
rate = 1 / time
# frequency (alias for rate)
frequency = rate
# spatial frequency
spatial_frequency = 1 / length
#: solid_angle
solid_angle = angle * angle
#: velocity
velocity = length / time
#: acceleration
acceleration = length / time ** 2
#: jerk
jerk = length / time ** 3
#: snap
snap = length / time ** 4
#: crackle
crackle = length / time ** 5
#: pop
pop = length / time ** 6
#: area
area = length * length
#: volume
volume = area * length
#: momentum
momentum = mass * velocity
#: force
force = mass * acceleration
#: pressure
pressure = force / area
#: energy
energy = force * length
#: power
power = energy / time
#: flux
flux = power / area
#: specific_flux
specific_flux = flux / rate
#: number_density
number_density = 1 / (length * length * length)
#: density
density = mass * number_density
#: angular_momentum
angular_momentum = mass * length * velocity
#: specific_angular_momentum
specific_angular_momentum = angular_momentum / mass
#: specific_energy
specific_energy = energy / mass
#: count_flux
count_flux = 1 / (area * time)
#: count_intensity
count_intensity = count_flux / solid_angle
#: luminous_flux
luminous_flux = luminous_intensity * solid_angle
#: luminance
luminance = luminous_intensity / area
# Gaussian electromagnetic units
#: charge_cgs
charge_cgs = (energy * length) ** Rational(1, 2) # proper 1/2 power
#: current_cgs
current_cgs = charge_cgs / time
#: electric_field_cgs
electric_field_cgs = charge_cgs / length ** 2
#: magnetic_field_cgs
magnetic_field_cgs = electric_field_cgs
#: electric_potential_cgs
electric_potential_cgs = energy / charge_cgs
#: resistance_cgs
resistance_cgs = electric_potential_cgs / current_cgs
#: magnetic_flux_cgs
magnetic_flux_cgs = magnetic_field_cgs * area
# SI electromagnetic units
#: charge
charge = charge_mks = current_mks * time
#: electric_field
electric_field = electric_field_mks = force / charge_mks
#: magnetic_field
magnetic_field = magnetic_field_mks = electric_field_mks / velocity
#: electric_potential
electric_potential = electric_potential_mks = energy / charge_mks
#: resistance
resistance = resistance_mks = electric_potential_mks / current_mks
#: capacitance
capacitance = capacitance_mks = charge / electric_potential
#: magnetic_flux
magnetic_flux = magnetic_flux_mks = magnetic_field_mks * area
#: inductance
inductance = inductance_mks = magnetic_flux_mks / current_mks
#: a list containing all derived_dimensions
derived_dimensions = [
rate,
velocity,
acceleration,
jerk,
snap,
crackle,
pop,
momentum,
force,
energy,
power,
charge_cgs,
electric_field_cgs,
magnetic_field_cgs,
solid_angle,
flux,
specific_flux,
volume,
luminous_flux,
area,
current_cgs,
charge_mks,
electric_field_mks,
magnetic_field_mks,
electric_potential_cgs,
electric_potential_mks,
resistance_cgs,
resistance_mks,
magnetic_flux_mks,
magnetic_flux_cgs,
luminance,
spatial_frequency,
]
#: a list containing all dimensions
dimensions = base_dimensions + derived_dimensions
#: a dict containing a bidirectional mapping from
#: mks dimension to cgs dimension
em_dimensions = {
magnetic_field_mks: magnetic_field_cgs,
magnetic_flux_mks: magnetic_flux_cgs,
charge_mks: charge_cgs,
current_mks: current_cgs,
electric_potential_mks: electric_potential_cgs,
resistance_mks: resistance_cgs,
}
for k, v in list(em_dimensions.items()):
em_dimensions[v] = k
def accepts(**arg_units):
"""Decorator for checking units of function arguments.
Parameters
----------
arg_units: dict
Mapping of function arguments to dimensions, of the form 'arg1'=dimension1 etc
where ``'arg1'`` etc are the function arguments and ``dimension1`` etc
are SI base units (or combination of units), eg. length/time.
Notes
-----
Keyword args are not dimensionally check, being directly passed to the
decorated function.
Function arguments that don't have attached units can be skipped can bypass
dimensionality checking by not being passed to the decorator. See ``baz`` in
the examples, where ``a`` has no units.
Examples
--------
>>> import unyt as u
>>> from unyt.dimensions import length, time
>>> @accepts(a=time, v=length/time)
... def foo(a, v):
... return a * v
...
>>> res = foo(a= 2 * u.s, v = 3 * u.m/u.s)
>>> print(res)
6 m
>>> @accepts(a=length, v=length/time)
... def bar(a, v):
... return a * v
...
>>> bar(a= 2 * u.s, v = 3 * u.m/u.s)
Traceback (most recent call last):
...
TypeError: arg 'a=2 s' does not match (length)
>>> @accepts(v=length/time)
... def baz(a, v):
... return a * v
...
>>> res = baz(a= 2, v = 3 * u.m/u.s)
>>> print(res)
6 m/s
"""
def check_accepts(f):
"""Decorates original function.
Parameters
----------
f : function
Function being decorated.
Returns
-------
new_f: function
Decorated function.
"""
names_of_args = f.__code__.co_varnames
@wraps(f)
def new_f(*args, **kwargs):
"""The new function being returned from the decorator.
Check units of `args` and `kwargs`, then run original function.
Raises
------
TypeError
If the units do not match.
"""
for arg_name, arg_value in chain(zip(names_of_args, args), kwargs.items()):
if arg_name in arg_units: # function argument needs to be checked
dimension = arg_units[arg_name]
if not _has_dimensions(arg_value, dimension):
raise TypeError(
"arg '%s=%s' does not match %s"
% (arg_name, arg_value, dimension)
)
return f(*args, **kwargs)
return new_f
return check_accepts
def returns(r_unit):
"""Decorator for checking function return units.
Parameters
----------
r_unit: :py:class:`sympy.core.symbol.Symbol`
SI base unit (or combination of units), eg. length/time
of the value returned by the original function
Examples
--------
>>> import unyt as u
>>> from unyt.dimensions import length, time
>>> @returns(length)
... def f(a, v):
... return a * v
...
>>> res = f(a= 2 * u.s, v = 3 * u.m/u.s)
>>> print(res)
6 m
>>> @returns(length/time)
... def f(a, v):
... return a * v
...
>>> f(a= 2 * u.s, v = 3 * u.m/u.s)
Traceback (most recent call last):
...
TypeError: result '6 m' does not match (length)/(time)
"""
def check_returns(f):
"""Decorates original function.
Parameters
----------
f : function
Function being decorated.
Returns
-------
new_f: function
Decorated function.
"""
@wraps(f)
def new_f(*args, **kwargs):
"""The decorated function, which checks the return unit.
Raises
------
TypeError
If the units do not match.
"""
result = f(*args, **kwargs)
if not _has_dimensions(result, r_unit):
raise TypeError("result '%s' does not match %s" % (result, r_unit))
return result
return new_f
return check_returns
def _has_dimensions(quant, dim):
"""Checks the argument has the right dimensionality.
Parameters
----------
quant : :py:class:`unyt.array.unyt_quantity`
Quantity whose dimensionality we want to check.
dim : :py:class:`sympy.core.symbol.Symbol`
SI base unit (or combination of units), eg. length/time
Returns
-------
bool
True if check successful.
Examples
--------
>>> import unyt as u
>>> from unyt.dimensions import length, time
>>> _has_dimensions(3 * u.m/u.s, length/time)
True
>>> _has_dimensions(3, length)
False
"""
try:
arg_dim = quant.units.dimensions
except AttributeError:
arg_dim = dimensionless
return arg_dim == dim
| yt-project/unyt | unyt/dimensions.py | Python | bsd-3-clause | 9,592 | [
"Gaussian"
] | 814088e95204e83b3c26ecaf8f68eff7d4c82150f3ce12650e1612ab78702126 |
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
# Last Updated: 5-1-2011 Updated By: Brian D'Urso
import visa
import numpy as npy
import pythics.libinstrument
class PowerSupply(visa.GpibInstrument):
#initialization
def __init__(self,*args,**kwargs):
visa.GpibInstrument.__init__(self,*args, **kwargs)
self.write('*CLS;*RST')
def read_volt(self):
#returns both programmed value and measured value
p=self.ask('MEAS:VOLT?')
return p
def read_current(self):
p=self.ask('MEAS:CURR?')
return p
def set_volts(self,x):
self.write('VOLT '+ x +' V')
def set_current(self,y):
self.write('CURR '+ y +' A')
def set_overvolt(self,z):
#sets overvolt and overcurrent protection
self.write('PROT' + z +';PROT:STAT ON')
def output_on(self):
self.write("OUTP 1")
def output_off(self):
self.write("OUTP 0")
| LunarLanding/Pythics | pythics/instruments/Agilent_6652A.py | Python | gpl-3.0 | 1,576 | [
"Brian"
] | 015f1cbd69d75693b95bc85159dfbb823a58d39b8aa8255fffba6202b3565b58 |
#Author : Lewis Mervin [email protected]
#Supervisor : Dr. A. Bender
#All rights reserved 2016
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 21/06/16) and ChEMBL21
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
import cPickle
import zipfile
import glob
import os
import sys
import math
import numpy as np
from multiprocessing import Pool
import multiprocessing
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: [email protected]\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#calculate fingerprints for chunked array of smiles
def arrayFP(inp):
outfp = []
outsmi = []
for i in inp:
try:
outfp.append(calcFingerprints(i))
outsmi.append(i)
except:
print 'SMILES Parse Error: ' + i
return outfp,outsmi
#import user query
def importQuery(in_file):
query = open(in_file).read().splitlines()
#collect IDs, if present
if len(query[0].split()) > 1:
ids = [line.split()[1] for line in query]
query = [line.split()[0] for line in query]
else:
ids = None
matrix = np.empty((len(query), 2048), dtype=np.uint8)
smiles_per_core = int(math.ceil(len(query) / N_cores)+1)
chunked_smiles = [query[x:x+smiles_per_core] for x in xrange(0, len(query), smiles_per_core)]
pool = Pool(processes=N_cores) # set up resources
jobs = pool.imap(arrayFP, chunked_smiles)
current_end = 0
processed_smi = []
for i, result in enumerate(jobs):
matrix[current_end:current_end+len(result[0]), :] = result[0]
current_end += len(result[0])
processed_smi += result[1]
pool.close()
pool.join()
#remove IDs of SMILES parsing errors
if ids:
processed_ids = []
for idx, smi in enumerate(query):
if smi in processed_smi:
processed_ids.append(ids[idx])
ids = processed_ids
#if IDs weren't present, use SMILES as IDs
else:
ids = processed_smi
return matrix[:current_end], processed_smi, ids
#get info for uniprots
def getUniprotInfo():
if os.name == 'nt': sep = '\\'
else: sep = '/'
model_info = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'classes_in_model.txt').read().splitlines()]
return_dict = {l[0] : l[0:8] for l in model_info}
return return_dict
#unzip a pkl model
def open_Model(mod):
if os.name == 'nt': sep = '\\'
else: sep = '/'
with zipfile.ZipFile(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + mod + '.pkl.zip', 'r') as zfile:
with zfile.open(mod + '.pkl', 'r') as fid:
clf = cPickle.load(fid)
return clf
#prediction worker
def doTargetPrediction(pickled_model_name):
if os.name == 'nt': sep = '\\'
else: sep = '/'
mod = pickled_model_name.split(sep)[-1].split('.')[0]
clf = open_Model(mod)
preds = clf.predict_proba(querymatrix)[:,1]
if sum(preds) > 0:
return mod,preds
else: return None
#prediction runner
def performTargetPrediction(models):
prediction_results = []
pool = Pool(processes=N_cores, initializer=initPool, initargs=(querymatrix,)) # set up resources
jobs = pool.imap_unordered(doTargetPrediction, models)
for i, result in enumerate(jobs):
percent = (float(i)/float(len(models)))*100 + 1
sys.stdout.write(' Performing Classification on Query Molecules: %3d%%\r' % percent)
sys.stdout.flush()
if result is not None: prediction_results.append(result)
pool.close()
pool.join()
return prediction_results
#initializer for the pool
def initPool(querymatrix_):
global querymatrix
querymatrix = querymatrix_
#main
if __name__ == '__main__':
if os.name == 'nt': sep = '\\'
else: sep = '/'
multiprocessing.freeze_support()
input_name = sys.argv[1]
N_cores = int(sys.argv[2])
introMessage()
print ' Predicting Targets for ' + input_name
print ' Using ' + str(N_cores) + ' Cores'
try:
desired_organism = sys.argv[3]
except IndexError:
desired_organism = None
models = [modelfile for modelfile in glob.glob(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + '*.zip')]
model_info = getUniprotInfo()
if desired_organism is not None:
models = [mod for mod in models if model_info[mod.split(sep)[-1].split('.')[0]][4] == desired_organism]
print ' Predicting for organism : ' + desired_organism
out_name = input_name + '_out_raw_' + desired_organism[:3] + '.txt'
out_file = open(out_name, 'w')
else:
out_name = input_name + '_out_raw.txt'
out_file = open(out_name, 'w')
print ' Total Number of Classes : ' + str(len(models))
querymatrix,smiles,ids = importQuery(input_name)
print ' Total Number of Query Molecules : ' + str(len(querymatrix))
prediction_results = performTargetPrediction(models)
out_file.write('Uniprot\tPref_Name\tGene ID\tTarget_Class\tOrganism\tPDB_ID\tDisGeNET_Diseases_0.06\tChEMBL_First_Published\t' + '\t'.join(map(str,ids)) + '\n')
for row in sorted(prediction_results):
out_file.write('\t'.join(map(str,model_info[row[0]])) + '\t' + '\t'.join(map(str,row[1])) + '\n')
print '\n Wrote Results to: ' + out_name
out_file.close()
| lhm30/PIDGINv2 | predict_raw.py | Python | mit | 5,535 | [
"RDKit"
] | f3a115a008f6730545ec8fd98c96c79fe3adf73fefaae5b2796e404e5baf080b |
"""Setup script for ``dms_tools2``.
Written by Jesse Bloom.
"""
import sys
import os
import subprocess
import re
import glob
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
raise ImportError("You must install `setuptools`")
if not (sys.version_info[0] == 3 and sys.version_info[1] >= 6):
raise RuntimeError('dms_tools2 requires Python 3.6 or higher.\n'
'You are using Python {0}.{1}'.format(
sys.version_info[0], sys.version_info[1]))
# get metadata, which is specified in another file
metadata = {}
with open('dms_tools2/_metadata.py') as f:
lines = f.readlines()
for dataname in ['version', 'author', 'author_email', 'url']:
for line in lines:
entries = line.split('=')
assert len(entries) == 2, "Failed parsing metadata:\n{0}".format(line)
if entries[0].strip() == '__{0}__'.format(dataname):
if dataname in metadata:
raise ValueError("Duplicate metadata for {0}".format(dataname))
else:
metadata[dataname] = entries[1].strip()[1 : -1]
assert dataname in metadata, "Failed to find metadata {0}".format(dataname)
with open('README.rst') as f:
readme = f.read()
# main setup command
setup(
name = 'dms_tools2',
version = metadata['version'],
author = metadata['author'],
author_email = metadata['author_email'],
url = metadata['url'],
download_url = 'https://github.com/jbloomlab/dms_tools2/tarball/{0}'.format(
metadata['version']), # assumes tagged version is on GitHub
description = 'Deep mutational scanning (DMS) analysis tools.',
long_description = readme,
license = 'GPLv3',
install_requires = [
'attrs>=17.4.0',
'biopython>=1.68',
'pysam>=0.13',
'pandas>=0.23,<1.0',
'numpy>=1.16,<1.20', # https://github.com/numpy/numpy/issues/18355
'IPython>=5.1',
'jupyter>=1.0.0',
'matplotlib>=2.1.1,<3.3',
'plotnine>=0.3,<0.7',
'mizani<0.7',
'natsort>=5.0.3',
'pystan>=2.19,<3',
'scipy>=1.0',
'seaborn>=0.8',
'phydms>=2.4.1',
'statsmodels>=0.8',
'regex>=2.4.153',
'packaging',
'umi_tools>=1.0.0',
],
extras_require = {
'rplot':[
'rpy2>=2.9.1',
'tzlocal', # required by rpy2 but not auto installed in 2.9.3
]
},
platforms = 'Linux and Mac OS X.',
packages = ['dms_tools2'],
package_dir = {'dms_tools2':'dms_tools2'},
package_data = {'dms_tools2':['rplot_Rcode.R']},
scripts = [
'scripts/dms2_bcsubamp',
'scripts/dms2_batch_bcsubamp',
'scripts/dms2_prefs',
'scripts/dms2_batch_prefs',
'scripts/dms2_diffsel',
'scripts/dms2_batch_diffsel',
'scripts/dms2_fracsurvive',
'scripts/dms2_batch_fracsurvive',
'scripts/dms2_logoplot',
],
ext_modules = [
Extension('dms_tools2._cutils', ['dms_tools2/_cutils.c'],
extra_compile_args=["-Wno-error=declaration-after-statement"])
],
)
| jbloomlab/dms_tools2 | setup.py | Python | gpl-3.0 | 3,208 | [
"Biopython",
"pysam"
] | a5ba5c987931dfdcd1ce39af4725ddbadae2ac1639161ad578963c4aaae40563 |
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Rudolf Streif ([email protected])
"""
from django.contrib import admin
from vehicles.models import Vehicle
from security.models import JSONWebKey
import tracking.tasks
location_channels = ['location', 'speed', 'odometer']
class VehicleAdmin(admin.ModelAdmin):
"""
Administration view for Vehicles.
"""
fieldsets = [
(None, {'fields': ['veh_name']}),
('Vehicle Information', {'fields': ['veh_make', 'veh_model', 'veh_vin', 'veh_year', ('veh_picture', 'detail_picture')]}),
('Account Information', {'fields': ['account']}),
('RVI Information', {'fields': ['veh_rvibasename']}),
('Security Information', {'fields': ['veh_key']}),
]
def subscribe_location(self, request, vehicles):
for vehicle in vehicles:
tracking.tasks.subscribe(vehicle, location_channels)
self.message_user(request, "Location subscriptions sent to selected vehicles.")
subscribe_location.short_description = "Subscribe to Location"
def unsubscribe_location(self, request, vehicles):
for vehicle in vehicles:
tracking.tasks.unsubscribe(vehicle, location_channels)
self.message_user(request, "Location subscription cancellations sent to selected vehicles.")
unsubscribe_location.short_description = "Unsubscribe from Location"
actions = [subscribe_location, unsubscribe_location]
list_display = ('veh_name', 'veh_make', 'veh_model', 'veh_vin', 'veh_rvistatus', 'list_account', 'list_picture')
readonly_fields = ('detail_picture',)
admin.site.register(Vehicle, VehicleAdmin)
| rstreif/rvi_backend | web/vehicles/admin.py | Python | mpl-2.0 | 1,859 | [
"Jaguar"
] | 9bed83d3c509c90e3804ae01e451e539a65fd135507b2fc6a2984fe1874b541e |
"""Installers for programming language specific libraries.
"""
import os
from fabric.api import env, cd, settings
from cloudbio import fabutils
from cloudbio.custom import shared
def r_library_installer(config):
"""Install R libraries using CRAN and Bioconductor.
"""
with shared._make_tmp_dir() as tmp_dir:
with cd(tmp_dir):
# Create an Rscript file with install details.
out_file = os.path.join(tmp_dir, "install_packages.R")
_make_install_script(out_file, config)
# run the script and then get rid of it
# try using either
rlib_installed = False
rscripts = []
conda_bin = shared._conda_cmd(env)
if conda_bin:
rscripts.append(fabutils.find_cmd(env, os.path.join(os.path.dirname(conda_bin), "Rscript"),
"--version"))
rscripts.append(fabutils.find_cmd(env, "Rscript", "--version"))
for rscript in rscripts:
if rscript:
env.safe_run("%s %s" % (rscript, out_file))
rlib_installed = True
break
if not rlib_installed:
env.logger.warn("Rscript not found; skipping install of R libraries.")
env.safe_run("rm -f %s" % out_file)
def _make_install_script(out_file, config):
if env.safe_exists(out_file):
env.safe_run("rm -f %s" % out_file)
env.safe_run("touch %s" % out_file)
lib_loc = os.path.join(env.system_install, "lib", "R", "site-library")
env.safe_sudo("mkdir -p %s" % lib_loc)
with settings(warn_only=True):
env.safe_sudo("chown -R %s %s" % (env.user, lib_loc))
repo_info = """
.libPaths(c("%s"))
library(methods)
cran.repos <- getOption("repos")
cran.repos["CRAN" ] <- "%s"
options(repos=cran.repos)
source("%s")
""" % (lib_loc, config["cranrepo"], config["biocrepo"])
env.safe_append(out_file, repo_info)
install_fn = """
repo.installer <- function(repos, install.fn, pkg_name_fn) {
%s
maybe.install <- function(pname) {
if (!is.null(pkg_name_fn)) {
pinfo <- pkg_name_fn(pname)
ipkgs <- installed.packages()[,3][pinfo["pkg"]]
if (is.na(ipkgs[pinfo["pkg"]]) || pinfo["version"] != ipkgs[pinfo["pkg"]])
install.fn(pinfo["pname"])
}
else if (!(is.element(pname, installed.packages()[,1])))
install.fn(pname)
}
}
"""
if config.get("update_packages", True):
update_str = """
update.packages(lib.loc="%s", repos=repos, ask=FALSE)
""" % lib_loc
else:
update_str = "\n"
env.safe_append(out_file, install_fn % update_str)
std_install = """
std.pkgs <- c(%s)
std.installer = repo.installer(cran.repos, install.packages, NULL)
lapply(std.pkgs, std.installer)
""" % (", ".join('"%s"' % p for p in config['cran']))
env.safe_append(out_file, std_install)
if len(config.get("bioc", [])) > 0:
bioc_install = """
bioc.pkgs <- c(%s)
bioc.installer = repo.installer(biocinstallRepos(), biocLite, NULL)
lapply(bioc.pkgs, bioc.installer)
""" % (", ".join('"%s"' % p for p in config['bioc']))
env.safe_append(out_file, bioc_install)
if config.get("cran-after-bioc"):
std2_install = """
std2.pkgs <- c(%s)
lapply(std2.pkgs, std.installer)
""" % (", ".join('"%s"' % p for p in config['cran-after-bioc']))
env.safe_append(out_file, std2_install)
if config.get("github"):
dev_install = """
library(devtools)
github.pkgs <- c(%s)
get_pkg_name <- function(orig) {
c(pkg=unlist(strsplit(unlist(strsplit(orig, "/"))[2], "@"))[1],
version=unlist(strsplit(orig, ";"))[2],
pname=unlist(strsplit(orig, ";"))[1])
}
github_installer = repo.installer(NULL, install_github, get_pkg_name)
lapply(github.pkgs, github_installer)
""" % (", ".join('"%s"' % p for p in config['github']))
env.safe_append(out_file, dev_install) | elkingtonmcb/cloudbiolinux | cloudbio/libraries.py | Python | mit | 4,183 | [
"Bioconductor"
] | 6486e3e6279a2be1084d51728d29548f5cd9fee76efcb66c3f5f79f8c86d6ecb |
#from spectral_stack import stacking #need to make sure Erik's code is installed
from spectral_cube import SpectralCube, Projection
import numpy as np
import numpy.ma as ma
import astropy.units as u
import matplotlib.pyplot as plt
from astropy.io import fits
from matplotlib import colors, cm
import aplpy
from astropy.table import Table, Column, vstack, QTable, MaskedColumn
import re
import glob
from astropy.wcs import WCS
import os
import ipdb
def makeSampleTable(regridDir, outDir, scriptDir, vtype='mom1', outname='test', release='DR1', sourceList=None):
'''
Calculate stacking results for each galaxy and make a fits table
of the results.
Inputs:
regridDir: directory containing regridded data
scriptDir: directory with DEGAS data base
outDir: directory for output results.
vtype: velocity type. Options are 'mom1' and 'peakVelocity'. Default: 'mom1'
outname: basename for output fits table.
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
10/30/2020 A.A. Kepley Added comments and clarified inputs
12/3/2020 A.A. Kepley Modified to pull list of galaxies from
degas base table.
'''
from datetime import datetime
# get list of dr1 galaxies
degas = Table.read(os.path.join(scriptDir,'degas_base.fits'))
idx = degas[release] == 1
if not sourceList:
sourceList = degas[idx]['NAME']
# go though each file, create a table, and attend it to the list of tables.
tablelist=[]
for galaxy in degas[idx]:
if galaxy['NAME'] in sourceList:
full_tab = makeGalaxyTable(galaxy, vtype, regridDir, outDir)
tablelist.append(full_tab)
# stack all the tables together
table=vstack(tablelist)
table.meta['DATE'] = str(datetime.now())
table.meta['RELEASE'] = release
# Write out the table
table.write(os.path.join(outDir,outname+'_'+vtype+'.fits'),overwrite=True)
return table
def makeGalaxyTable(galaxy, vtype, regridDir, outDir):
'''
make fitstable containing all lines from stacking results for each galaxy
galaxy: data for galaxy we are processing from degas_base.fits
vtype: velocity type that we are stacking on
scriptDir: script directory -- AAK: if I read in degas_base.fits early,
so I still need to pass this down.?
regridDir: regrid directory
outDir: output directory
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added comments
4/29/2021 A.A. Kepley Moved all calculations for galaxy here
instead of repeating per line.
5/6/2021 A.A. Kepley Modified so that all lines are calculated at once.
'''
print("Processing " + galaxy['NAME'] + "\n")
# Create associated maps needed for analysis.
# TODO -- double-check on mom0cut -- Is this really what i want to be doing??
## FIX UP THIS TO READ IN TO MOM0 AND ASSOCIATED ERROR PRODUCED VIA ANOTHER ROUTE.
cubeCO, comap = mapCO(galaxy, regridDir, outDir)
stellarmap = mapStellar(galaxy, regridDir, outDir) # to apply CO mask mask=mom0cut
sfrmap = mapSFR(galaxy, regridDir, outDir)
ltirmap = mapLTIR(galaxy, regridDir,outDir)
R_arcsec, R_kpc, R_r25 = mapGCR(galaxy, comap) # comap is just used to get coordinates
velocity_file = galaxy['NAME']+'_12CO_'+vtype+'_regrid.fits'
vhdu = fits.open(os.path.join(regridDir,velocity_file))
velocity = Projection.from_hdu(vhdu)
# read in HCN
linefile = glob.glob(os.path.join(regridDir,galaxy['NAME']+'_HCN_*_hanning1_maxnchan_smooth.fits'))[0]
cubeHCN = SpectralCube.read(os.path.join(regridDir,linefile))
# read in HCO+
linefile = glob.glob(os.path.join(regridDir,galaxy['NAME']+'_HCOp_*_hanning1_smooth_regrid.fits'))[0]
cubeHCOp = SpectralCube.read(os.path.join(regridDir,linefile))
# For NGC6946, skip 13CO and C18O since we don't have that data.
# For NGC4569, we are temporarily missing data.
if (galaxy['NAME'] != 'NGC6946') & (galaxy['NAME'] != 'NGC4569'):
# read in 13CO
linefile = glob.glob(os.path.join(regridDir,galaxy['NAME']+'_13CO_*_hanning1_smooth_regrid.fits'))[0]
cube13CO = SpectralCube.read(os.path.join(regridDir,linefile))
# read in C18O
linefile = glob.glob(os.path.join(regridDir,galaxy['NAME']+'_C18O_*_hanning1_smooth_regrid.fits'))[0]
cubeC18O = SpectralCube.read(os.path.join(regridDir,linefile))
else:
cube13CO = None
cubeC18O = None
#get the full stack result for each line
full_stack = makeStack(galaxy, regridDir, outDir,
cubeCO = cubeCO,
cubeHCN = cubeHCN, cubeHCOp=cubeHCOp,
cube13CO = cube13CO, cubeC18O = cubeC18O,
velocity = velocity,
comap = comap,
sfrmap = sfrmap, ltirmap = ltirmap,
stellarmap=stellarmap, R_arcsec=R_arcsec,
R_r25=R_r25)
# remove stacks that don't have CO spectra
nstack = len(full_stack)
keepstack = np.full(nstack,True)
for i in range(nstack):
if np.all(full_stack['stack_profile_CO'][i] == 0):
keepstack[i] = False
full_stack = full_stack[keepstack]
# create directory for spectral line fits
fit_plot_dir = os.path.join(outDir,'spec_fits')
if not os.path.exists(fit_plot_dir):
os.mkdir(fit_plot_dir)
# calculate integrated intensity
full_stack = addIntegratedIntensity(full_stack, fit_plot_dir)
# return the table and the stack.
return full_stack
def makeStack(galaxy, regridDir, outDir,
cubeCO = None,
cubeHCN = None, cubeHCOp=None,
cube13CO = None, cubeC18O = None,
velocity = None, comap = None,
sfrmap = None, ltirmap = None,
stellarmap = None, R_arcsec = None,
R_r25 = None):
'''
make stacks for all lines and ancillary data for one galaxy
output python dictionaries
galaxy: degas_base.fits entry for galaxy we are processing
line: line we are stacking on
regridDir: directory with all the regridded data
outDir: directory with all the output data.
ASSUMPTIONS:
We are assuming that all linecubes have been smoothed
and regridded at this point, so the coordinate systems match.
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added more comments plus minor changes to
use the degas table
12/10/2020 A.A. Kepley Added LTIR
4/29/2021 A.A. Kepley Moved up map calculations to makeStack to
avoid doing multiple times.
'''
## create radius bins
binmap, binedge, binlabels = makeRadiusBins(galaxy, R_arcsec, outDir)
plotBins(galaxy, binmap, binedge, binlabels, 'radius', outDir)
# stack on radius
stack_radius = stackLines(galaxy, velocity,
binmap, binedge, 'radius',
cubeCO = cubeCO,
cubeHCN = cubeHCN, cubeHCOp = cubeHCOp,
cube13CO = cube13CO, cubeC18O = cubeC18O,
sfrmap = sfrmap, ltirmap = ltirmap,
stellarmap = stellarmap)
stack_radius.add_column(Column(np.tile('radius',len(stack_radius))),name='bin_type',index=0)
# create r25 bins
binmap, binedge, binlabels = makeRadiusBins(galaxy, R_r25, outDir,r25=True)
plotBins(galaxy, binmap, binedge, binlabels, 'r25', outDir)
# stack on R25
stack_r25 = stackLines(galaxy, velocity,
binmap, binedge, 'r25',
cubeCO = cubeCO,
cubeHCN = cubeHCN, cubeHCOp = cubeHCOp,
cube13CO = cube13CO, cubeC18O = cubeC18O,
sfrmap = sfrmap, ltirmap = ltirmap,
stellarmap = stellarmap)
stack_r25.add_column(Column(np.tile('r25',len(stack_r25))),name='bin_type',index=0)
## create stellar mass bin
binmap, binedge, binlabels = makeStellarmassBins(galaxy, stellarmap, 'stellarmass', outDir)
plotBins(galaxy, binmap, binedge, binlabels, 'stellarmass', outDir)
## do stellar mass stack
stack_stellarmass=stackLines(galaxy, velocity,
binmap, binedge, 'stellarmass',
cubeCO = cubeCO,
cubeHCN = cubeHCN, cubeHCOp = cubeHCOp,
cube13CO = cube13CO, cubeC18O = cubeC18O,
sfrmap = sfrmap, ltirmap = ltirmap,
stellarmap = stellarmap)
stack_stellarmass.add_column(Column(np.tile('stellarmass',len(stack_stellarmass))),name='bin_type',index=0)
# remove excess bins by requiring that the number of spectra in the stack
# keep increasing.
stack_stellarmass.sort('bin_mean',reverse=True)
print(stack_stellarmass['bin_type','bin_mean','stack_weights'])
delta_pix = stack_stellarmass['stack_weights'][1:] - stack_stellarmass['stack_weights'][0:-1]
# if difference is very small ignore.
delta_pix[np.where( (delta_pix <0 ) & (delta_pix >-5))] = 1
lastidx = np.argmax(delta_pix < 0)+1
if lastidx == 1:
lastidx = len(stack_stellarmass)
orig_len = len(stack_stellarmass)
stack_stellarmass = stack_stellarmass[0:lastidx]
new_len = len(stack_stellarmass)
print("Removed " + str(orig_len - new_len) + " of " + str(orig_len) + " spectra from stellarmass stack")
## create intensity bins
#binmap, binedge, binlabels = makeBins(galaxy, comap, 'intensity', outDir)
#plotBins(galaxy, binmap, binedge, binlabels, 'intensity', outDir)
# do intensity stack
#stack_intensity=stackLines(galaxy, velocity,
# binmap, binedge, 'intensity',
# cubeCO = cubeCO,
# cubeHCN = cubeHCN, cubeHCOp = cubeHCOp,
# cube13CO = cube13CO, cubeC18O = cubeC18O,
# sfrmap = sfrmap, ltirmap = ltirmap,
# stellarmap = stellarmap)
# stack_intensity.add_column(Column(np.tile('intensity',len(stack_intensity))),name='bin_type',index=0)
# # remove excess bins by requiring that the number of spectra in the stack
# # keeping increasing.
# stack_intensity.sort('bin_mean',reverse=True)
# print(stack_intensity['bin_type','bin_mean','stack_weights'])
# delta_pix = stack_intensity['stack_weights'][1:] - stack_intensity['stack_weights'][0:-1]
# # if difference is very small skip.
# delta_pix[np.where( (delta_pix <0 ) & (delta_pix >-5))] = 1
# lastidx = np.argmax(delta_pix < 0)+1
# if lastidx == 0:
# lastidx = len(stack_intensity)
# orig_len = len(stack_intensity)
# stack_intensity = stack_intensity[0:lastidx]
# new_len = len(stack_intensity)
# print("Removed " + str(orig_len - new_len) + " of " + str(orig_len) + " spectra from intensity stack")
#full_stack = vstack([stack_radius,stack_r25, stack_stellarmass, stack_intensity])
full_stack = vstack([stack_radius,stack_r25, stack_stellarmass])
full_stack.add_column(Column(np.tile(galaxy['NAME'],len(full_stack))),name='galaxy',index=0)
return full_stack
def stackLines(galaxy, velocity,
binmap, binedge, bintype,
cubeCO = None,
cubeHCN = None,
cubeHCOp = None,
cube13CO = None,
cubeC18O = None,
sfrmap = None,
ltirmap = None,
stellarmap = None,
maxAbsVel = 250.0):
'''
Actually do the stacking.
cube: data we are stacking (SpectralCube)
galaxy: line from degas_base.fits table with galaxy information.
velocity: velocity map to stack data using
binmap: bin map
binedge: bin edges
binlabel: bin labels
bintype: type of bin
cubeCO: CO cube
cubeHCN: HCN cube
cubeHCOp: HCO+ cube
cube13CO: 13CO cube
cubeC18O: C18O cube
sfrmap: star formation rate map
ltirmap: LTIR map
stellarmap: stellar mass map
maxAbsVel: absolute maximum velocity to include in spectral in km/s.
Assumes line is centered at zero (which should be true for stacking).
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/03/2020 A.A. Kepley Added comments and moved bin creation up
a level to simplify code.
12/10/2020 A.A. Kepley added LTIR calculation
5/6/2021 A.A. Kepley modified to fit all lines for one galaxy at
once so I can use CO FWHM to calculate upper
limits for other lines
9/2/2021 A.A. Kepley Added code to calculate the stellar mass
surface density. Cleaned up units.
'''
from signal_id.stacking import bin_by_label as BinByLabel
# get the relevant info on the galaxy and cube
D = galaxy['DIST_MPC'] * u.Mpc
pix_area = (np.radians(np.abs(cubeCO.header['CDELT1']))*D.to(u.pc))**2 #pc^2
# do the individual line stacks.
# -----------------------------
stack={}
if cubeCO:
stack['CO'], labelvals = BinByLabel(cubeCO,
binmap.value, velocity,
weight_map=None,
return_weights=True)
if cubeHCN:
stack['HCN'], labelvals = BinByLabel(cubeHCN,
binmap.value, velocity,
weight_map=None,
return_weights=True)
if cubeHCOp:
stack['HCOp'], labelvals = BinByLabel(cubeHCOp,
binmap.value, velocity,
weight_map=None,
return_weights=True)
if cube13CO:
stack['13CO'], labelvals = BinByLabel(cube13CO,
binmap.value, velocity,
weight_map=None,
return_weights=True)
if cubeC18O:
stack['C18O'], labelvals = BinByLabel(cubeC18O,
binmap.value, velocity,
weight_map=None,
return_weights=True)
# putting the table together
# -------------------------
# first add bins
t = {'bin_lower': binedge[0:-1].value,
'bin_upper': binedge[1:].value,
'bin_mean': (binedge[0:-1].value + binedge[1:].value)/2.0}
total_stack = QTable(t,masked=True)
nstack = len(labelvals[labelvals != 99])
# Then set up the structures for the bin-based profiles, etc.
spectral_profile = {}
stack_weights = {}
stack_weights = np.zeros((nstack))
for line in ['CO','HCN','HCOp','13CO','C18O']:
spectral_profile[line] = np.zeros((nstack,len(stack['CO'][0]['spectral_axis'])))
bin_label = np.zeros(nstack)
bin_area = np.zeros(nstack) * pix_area.unit
bin_unit = np.full(nstack, "",dtype="S15")
sfr_mean = np.zeros(nstack) * sfrmap.unit
ltir_mean = np.zeros(nstack)* ltirmap.unit
mstar_mean = np.zeros(nstack) * stellarmap.unit
spectral_axis = np.zeros((nstack,len(stack['CO'][0]['spectral_axis']))) * stack['CO'][0]['spectral_axis'].unit
for i in range(nstack):
spectral_axis[i,:] = stack['CO'][i]['spectral_axis']
bin_label[i] = stack['CO'][i]['label']
bin_area[i]= float(sum(binmap[binmap==bin_label[i]].flatten()))*pix_area
bin_unit[i] = binedge[0].unit.to_string()
stack_weights[i] = stack['CO'][i]['weights']
# calculating the mean SFR as requested
if sfrmap is not None:
sfr_mean[i] = np.nanmean(sfrmap[binmap==bin_label[i]])
# calculate the mean LTIR as requested
if ltirmap is not None:
ltir_mean[i] = np.nanmean(ltirmap[binmap==bin_label[i]])
# calculate mean stellar mass as requested
if stellarmap is not None:
mstar_mean[i] = np.nanmean(stellarmap[binmap==bin_label[i]])
# get the spectral profiles
for line in ['CO','HCN','HCOp','13CO','C18O']:
if line in stack.keys():
spectral_profile[line][i,:] = stack[line][i]['spectrum']
else:
spectral_profile[line][i,:] = np.full(len(stack['CO'][0]['spectral_axis']),np.nan)
# add above items to the table
total_stack.add_column(Column(spectral_axis),name='spectral_axis')
for line in ['CO','HCN','HCOp','13CO','C18O']:
total_stack.add_column(Column(spectral_profile[line], name='stack_profile_'+line,unit=cubeCO.unit))
total_stack.add_column(Column(bin_unit,name='bin_unit'),index=3)
total_stack.add_column(Column(bin_area,name='bin_area'),index=4) # pc^2
total_stack.add_column(Column(stack_weights,name='stack_weights'),index=5)
if sfrmap is not None:
total_stack.add_column(Column(sfr_mean.to('Msun/(yr pc2)'),name='sfr_mean')) # Msun/yr/(kpc -> pc)^2
total_stack.add_column(Column(sfr_mean.to('Msun/(yr pc2)') * bin_area),name='sfr_total') # Msun/yr/pc^2 * (pc)^2 = Msun/Yr
if ltirmap is not None:
total_stack.add_column(Column(ltir_mean) , name='ltir_mean') # Lsun/pc^2
total_stack.add_column(Column(ltir_mean * bin_area),name='ltir_total') # Lsun/pc^2 * pc^2 = Lsun
if stellarmap is not None:
total_stack.add_column(Column(mstar_mean), name='mstar_mean') # Msun/pc^2
total_stack.add_column(Column(mstar_mean * bin_area), name='mstar_total') # Msun
return total_stack
def addIntegratedIntensity(full_stack, outDir, alpha_co = 4.3*(u.Msun / u.pc**2) / (u.K * u.km / u.s)):
'''
calculate the integrated line flux and the associated noise
Input:
full_stack: astropy table with one row containing the stacks for
each bin.
outDir: directory for diagnostics plots
alpha_co: alpha_co value to use to calculate molecular gas mass and mass surface density.
Output:
stack with added columns for the integrated intensity.
Date Programmer Description of Changes
----------------------------------------------------------------------
5/13/2021 A.A. Kepley Original Code
9/2/2021 A.A. Kepley Added CO mass calculation
'''
# get number of stacks
nstack = len(full_stack)
# initialize output arrays
int_intensity_fit = {}
int_intensity_fit_err = {}
int_intensity_fit_uplim = {}
int_intensity_sum = {}
int_intensity_sum_err = {}
int_intensity_sum_uplim = {}
fwhm_fit = np.zeros(nstack)* full_stack['spectral_axis'].unit
int_intensity_fit['CO'] = ma.zeros(nstack) * full_stack['stack_profile_CO'].unit * full_stack['spectral_axis'].unit
int_intensity_fit_err['CO'] = ma.zeros(nstack)* full_stack['stack_profile_CO'].unit * full_stack['spectral_axis'].unit
int_intensity_fit_uplim['CO'] = np.full(nstack,False)
for line in ['CO','HCN','HCOp','13CO','C18O']:
int_intensity_sum[line] = ma.zeros(nstack) * full_stack['stack_profile_CO'].unit * full_stack['spectral_axis'].unit
int_intensity_sum_err[line] = ma.zeros(nstack) * full_stack['stack_profile_CO'].unit * full_stack['spectral_axis'].unit
int_intensity_sum_uplim[line] = np.full(nstack,False)
# calculate the integrated intensity from a fit and from a simple sum.
for i in range(nstack):
# fit CO line to get integration region
line = 'CO'
(stack_int, stack_int_err, stack_fit, uplim) = fitIntegratedIntensity(full_stack[i], line, outDir)
# skip the CO if you can't fit anything to it.
if stack_fit.nvarys == 1:
int_intensity_fit['CO'][i] = np.nan
int_intensity_fit_err['CO'][i] = np.nan
int_intensity_fit_uplim[i] = True
fwhm_fit[i] = np.nan
for line in ['CO','HCN', 'HCOp', '13CO','C18O']:
int_intensity_sum[line][i] = np.nan
int_intensity_sum_err[line][i] = np.nan
int_intensity_sum_uplim[line][i] = True
else:
fwhm_velrange = full_stack[i]['spectral_axis'][stack_fit.eval() > 0.5 * np.max(stack_fit.eval())]
fwhm = fwhm_velrange[-1] - fwhm_velrange[0]
int_intensity_fit['CO'][i] = stack_int
int_intensity_fit_err['CO'][i] = stack_int_err
int_intensity_fit_uplim[i] = uplim
fwhm_fit[i] = fwhm
int_axis = full_stack[i]['spectral_axis'][stack_fit.eval() > 0.01 * np.max(stack_fit.eval())]
velrange = [np.min(int_axis), np.max(int_axis)]
# straight sum
for line in ['CO','HCN', 'HCOp', '13CO','C18O']:
if ( ( full_stack[i]['galaxy'] == 'NGC6946') &
( ( line == '13CO') | (line == 'C18O'))):
int_intensity_sum[line][i] = np.nan
int_intensity_sum_err[line][i] = np.nan
int_intensity_sum_uplim[line][i] = True
else:
(stack_sum, stack_sum_err, uplim) = sumIntegratedIntensity(full_stack[i],
line,
outDir,
fwhm=fwhm,
velrange=velrange)
int_intensity_sum[line][i] = stack_sum
int_intensity_sum_err[line][i] = stack_sum_err
int_intensity_sum_uplim[line][i] = uplim
# add CO measurements to table
full_stack.add_column(Column(int_intensity_fit['CO'],name='int_intensity_fit_CO'))
full_stack.add_column(Column(int_intensity_fit_err['CO'],name='int_intensity_fit_err_CO'))
full_stack.add_column(Column(int_intensity_fit_uplim['CO'],name='int_intensity_fit_uplim_CO'))
# calculate CO mass and add to table
full_stack.add_column(Column(int_intensity_sum['CO']*alpha_co,name='comass_mean'))
full_stack.add_column(full_stack['comass_mean'] * full_stack['bin_area'],name='comass_total')
full_stack.meta['alpha_co'] = alpha_co.to_string()
# add integrated line intensity measurements to the data table.
for line in ['CO','HCN','HCOp','13CO','C18O']:
full_stack.add_column(Column(int_intensity_sum[line],name='int_intensity_sum_'+line))
full_stack.add_column(Column(int_intensity_sum_err[line],name='int_intensity_sum_err_'+line))
full_stack.add_column(Column(int_intensity_sum_uplim[line],name='int_intensity_sum_uplim_'+line))
full_stack.add_column(Column(fwhm_fit),name='FWHM_fit')
# Calculate line ratios and add to data table
full_stack = calcLineRatio(full_stack,'HCN','CO')
full_stack = calcLineRatio(full_stack,'HCOp','CO')
full_stack = calcLineRatio(full_stack,'13CO','CO')
full_stack = calcLineRatio(full_stack,'C18O','CO')
full_stack = calcLineRatio(full_stack,'13CO','C18O')
full_stack = calcLineRatio(full_stack,'HCOp','HCN')
full_stack = calcOtherRatio(full_stack,'ltir_mean','HCN')
full_stack = calcOtherRatio(full_stack,'ltir_mean','CO')
return full_stack
def calcLineRatio(full_stack,line1, line2):
'''
calculate arbitrary line ratios and add to stack
Date Programmer Description of Changes
----------------------------------------------------------------------
9/9/2021 A.A. Kepley Original Code
'''
if 'int_intensity_sum_'+line1 not in full_stack.columns:
print('line '+line1+' not in stack')
return
if 'int_intensity_sum_'+line2 not in full_stack.columns:
print('line '+line2+' not in stack')
return
ratio = full_stack['int_intensity_sum_'+line1] / full_stack['int_intensity_sum_'+line2]
error = ratio * \
np.sqrt( (full_stack['int_intensity_sum_err_'+line1]/full_stack['int_intensity_sum_'+line1])**2 + \
(full_stack['int_intensity_sum_err_'+line2]/full_stack['int_intensity_sum_'+line2])**2)
valid = full_stack['int_intensity_sum_uplim_'+line1] & full_stack['int_intensity_sum_uplim_'+line2]
uplim = full_stack['int_intensity_sum_uplim_'+line1] & \
np.invert(full_stack['int_intensity_sum_uplim_'+line2])
lolim = np.invert(full_stack['int_intensity_sum_uplim_'+line1]) & \
full_stack['int_intensity_sum_uplim_'+line2]
full_stack.add_column(MaskedColumn(ratio.value,name='ratio_'+line1+'_'+line2,mask=valid))
full_stack.add_column(MaskedColumn(error.value,name='ratio_'+line1+'_'+line2+'_err',mask=valid))
full_stack.add_column(MaskedColumn(lolim,name='ratio_'+line1+'_'+line2+'_lolim',mask=valid))
full_stack.add_column(MaskedColumn(uplim,name='ratio_'+line1+'_'+line2+'_uplim',mask=valid))
return full_stack
def calcOtherRatio(full_stack,quant,line):
'''
calculate ratio of arbitrary input to line
Date Programmer Description of Changes
--------------------------------------------------
9/9/2021 A.A. Kepley Original Code
'''
if quant not in full_stack.columns:
print(quant+' not in stack')
return
if 'int_intensity_sum_'+line not in full_stack.columns:
print('line ' + line + ' not in stack')
return
ratio = full_stack[quant] / full_stack['int_intensity_sum_'+line]
error = ratio * (full_stack['int_intensity_sum_err_'+line]/full_stack['int_intensity_sum_'+line])
lolim = full_stack['int_intensity_sum_uplim_'+line]
full_stack.add_column(Column(ratio,name='ratio_'+quant+'_'+line))
full_stack.add_column(Column(error,name='ratio_'+quant+'_'+line+'_err'))
full_stack.add_column(Column(lolim),name='ratio_'+quant+'_'+line+'_lolim')
return full_stack
def sumIntegratedIntensity(stack, line, outDir, fwhm=None, velrange=[-250,250]*(u.km/u.s), snThreshold=3.0):
'''
calculate the straight sum of the integrated intensity.
Date Programmer Description of Changes
----------------------------------------------------------------------
5/13/2021 A.A. Kepley Original Code
'''
from scipy.ndimage import label
# default is to scale to normal distribution
from scipy.stats import median_absolute_deviation as mad
spectral_axis = stack['spectral_axis']
stack_profile = stack['stack_profile_'+line]
chanwidth = spectral_axis[1] - spectral_axis[0]
lineFreeChans = ((spectral_axis > velrange[1]) | (spectral_axis < velrange[0])) & (stack_profile != 0)
# mad is already scaled to gaussian distribution
noisePerChan = mad(stack_profile[lineFreeChans]) * stack_profile.unit
lineChans = (spectral_axis < velrange[1] ) & (spectral_axis > velrange[0])
stack_sum = np.sum(stack_profile[lineChans]*chanwidth)
stack_sum_err = np.sqrt(fwhm/chanwidth) * chanwidth * noisePerChan
if stack_sum > (snThreshold * stack_sum_err):
uplim = False
else:
stack_sum = stack_sum_err * snThreshold
uplim = True
# make plot
plt.clf()
fig, myax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
plt.axhline(0,color='gray',linestyle=':')
plt.plot(spectral_axis, stack_profile, label='data',color='orange')
plt.xlabel('Velocity - ' + spectral_axis.unit.to_string())
plt.ylabel('Average Intensity - ' + stack_profile.unit.to_string())
plt.title(stack['galaxy'] + ' ' + line + ' ' + stack['bin_type'] + ' ' + str(stack['bin_mean']))
plt.text(0.07, 0.95, "Noise="+noisePerChan.to_string(),transform=myax.transAxes)
plt.axhspan(-3.0*noisePerChan.value, 3.0*noisePerChan.value, color='gray', alpha=0.2)
plt.axvspan(spectral_axis[lineChans][0].value,spectral_axis[lineChans][-1].value, color='blue',alpha=0.2)
lineFreeRegs, nregs = label(lineFreeChans)
for i in range(0, nregs+1):
if np.all(lineFreeChans[lineFreeRegs == i]):
plt.axvspan(spectral_axis[lineFreeRegs == i][0].value,
spectral_axis[lineFreeRegs == i][-1].value,
color='green',alpha=0.2)
plt.legend(loc='upper right')
plotname = stack['galaxy'] + '_' + line + '_' + stack['bin_type'] + '_'+str(stack['bin_mean'])+'_sum.png'
plt.savefig(os.path.join(outDir,plotname))
plt.close()
return stack_sum, stack_sum_err, uplim
def fitIntegratedIntensity(stack, line, outDir, fwhm=None, maxAbsVel=250 * u.km/u.s):
'''
calculate integrated intensity via a gaussian fit
input:
stack: single stack
outDir: output directory for plots and diagnostics
fwhm: fwhm to use for upper limit estimate
maxAbsVel: maximum velocity at which we expect emission.
snThreshold: S/N threshold for peak finding
Date Programmer Description of Changes
----------------------------------------------------------------------
5/13/2021 A.A. Kepley Original Code
'''
from matplotlib import gridspec
# default is to scale to normal distribution
from scipy.stats import median_absolute_deviation as mad
#from astropy.modeling import models, fitting
from scipy import integrate
#from scipy.stats import f
from lmfit.models import GaussianModel, ConstantModel
spectral_axis = stack['spectral_axis']
stack_profile = stack['stack_profile_'+line]
chanwidth = spectral_axis[1] - spectral_axis[0]
lineFreeChans = ((spectral_axis > maxAbsVel ) | (spectral_axis < - maxAbsVel)) & \
(stack_profile.value != 0)
lineChans = (spectral_axis < maxAbsVel ) & (spectral_axis > - maxAbsVel)
noisePerChan = mad(stack_profile[lineFreeChans]) * stack_profile.unit # mad is already scaled to gaussian distribution
weights = np.ones(len(stack_profile)) / noisePerChan.value
# setup plot
plt.clf()
fig = plt.figure(figsize=(8,6))
gs = gridspec.GridSpec(2,1,height_ratios=[4,1])
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
#fig, myax = plt.subplots(nrows=2, ncols=1,figsize=(8,6))
ax0.axhline(0,color='gray',linestyle=':')
ax0.plot(spectral_axis, stack_profile,label='data')
ax0.set_ylabel('Average Intensity - ' + stack_profile.unit.to_string())
ax0.set_title(stack['galaxy'] + ' ' + line + ' ' + stack['bin_type'] + ' ' + str(stack['bin_mean']))
ax0.text(0.07,0.95,"Noise="+noisePerChan.to_string(),transform=ax0.transAxes)
ax0.axhspan(-3.0*noisePerChan.value, 3.0*noisePerChan.value, color='gray', alpha=0.2)
ax1.axhline(0,color='gray',linestyle=':')
ax1.set_xlabel('Velocity - ' + spectral_axis.unit.to_string())
ax1.set_ylabel('Fit Residuals - ' + stack_profile.unit.to_string())
# Start with simple DC model
dcOffset = ConstantModel()
pars_dc = dcOffset.make_params(c=0)
dcOffsetFit = dcOffset.fit(stack_profile.value,pars_dc, x=spectral_axis.value, weights=weights)
#print(dcOffsetFit.fit_report())
ax0.axhline(dcOffsetFit.best_fit,label='DC Offset',color='gray')
ax0.text(0.07,0.9,'DC BIC='+str(dcOffsetFit.bic), transform=ax0.transAxes)
# Fit Single Gaussian
g1 = GaussianModel()
pars_g1 = g1.guess(stack_profile.value,x=spectral_axis.value)
pars_g1['sigma'].max = 200.0
#pars_g1['amplitude'].min = 0.0
pars_g1['amplitude'].min = noisePerChan.value
g1Fit = g1.fit(stack_profile.value,pars_g1,x=spectral_axis.value,weights=weights)
#print(g1Fit.fit_report())
ax0.plot(spectral_axis,g1Fit.best_fit,label='1 Gauss')
ax0.text(0.07,0.85,'1 Gauss BIC='+str(g1Fit.bic), transform=ax0.transAxes)
if (dcOffsetFit.bic > g1Fit.bic):
# single gaussian is better than line, so try a 2 gaussian fit.
# Fit 2 gaussians to compare to single gauss fit
g2 = GaussianModel(prefix='g1_') + GaussianModel(prefix='g2_')
pars_g2 = g2.make_params(g1_amplitude = g1Fit.params['amplitude'].value/2.0,
g1_sigma = g1Fit.params['sigma'].value/2.0,
g1_center = g1Fit.params['center']-g1Fit.params['sigma'],
g2_amplitude = g1Fit.params['amplitude'].value/2.0,
g2_sigma = g1Fit.params['sigma'].value/2.0,
g2_center = g1Fit.params['center']+g1Fit.params['sigma'])
#pars_g2['g1_center'].min = -maxAbsVel.value
#pars_g2['g1_center'].max = maxAbsVel.value
pars_g2['g1_sigma'].max = 200.0
#pars_g2['g1_amplitude'].min = 0.0
pars_g2['g1_amplitude'].min = noisePerChan.value
#pars_g2['g2_center'].min = -maxAbsVel.value
#pars_g2['g2_center'].max = maxAbsVel.value
pars_g2['g2_sigma'].max = 200.0
#pars_g2['g2_amplitude'].min = 0.0
pars_g2['g2_amplitude'].min = noisePerChan.value
g2Fit = g2.fit(stack_profile.value, pars_g2, x=spectral_axis.value,weights=weights)
# print(g2Fit.fit_report())
ax0.plot(spectral_axis,g2Fit.best_fit,label='2 Gauss')
ax0.text(0.07,0.8,'2 Gauss BIC='+str(g2Fit.bic), transform=ax0.transAxes)
if g2Fit.bic > g1Fit.bic:
# single gaussian fit best -- revert to single gaussian
stack_int = integrate.trapezoid(g1Fit.best_fit*stack_profile.unit,
x=spectral_axis)
# get from fit
stack_fit = g1Fit
fwhm = g1Fit.values['fwhm'] * spectral_axis.unit
stack_int_err = np.sqrt(fwhm/chanwidth) * chanwidth * noisePerChan
ax0.text(0.07,0.75,'Best: 1 Gauss', transform=ax0.transAxes)
ax1.plot(spectral_axis,g1Fit.residual)
else:
# two gaussian fit best
stack_int = integrate.trapezoid(g2Fit.best_fit*stack_profile.unit,
x=spectral_axis)
# calculate from fit
stack_fit = g2Fit
fwhm_velrange = spectral_axis[g1Fit.eval() > 0.5 * np.max(g1Fit.eval())]
fwhm = (fwhm_velrange[-1] - fwhm_velrange[0])
stack_int_err = np.sqrt(fwhm/chanwidth) * chanwidth * noisePerChan
ax0.text(0.07,0.75,'Best: 2 Gauss', transform=ax0.transAxes)
ax1.plot(spectral_axis,g2Fit.residual)
uplim = False
elif fwhm:
# dc offset is best fit. Estimate upper limit based on FWHM and S/N threshold
stack_int_err = np.sqrt(fwhm/chanwidth) * chanwidth * noisePerChan
stack_int = snThreshold * stack_int_err
uplim = True
stack_fit = dcOffsetFit
ax0.text(0.07,0.8,'Best: DC', transform=myax.transAxes)
ax1.plot(spectral_axis,dcOffsetFit.residual)
else:
stack_int = np.nan * spectral_axis.unit * stack_profile.unit
stack_int_err = np.nan* spectral_axis.unit * stack_profile.unit
fwhm = np.nan* spectral_axis.unit
uplim = True
stack_fit = dcOffsetFit
ax0.legend(loc='upper right')
plotname = stack['galaxy'] + '_' + line + '_' + stack['bin_type'] + '_'+str(stack['bin_mean'])+'_fit.png'
plt.savefig(os.path.join(outDir,plotname))
plt.close()
return stack_int, stack_int_err, stack_fit, uplim
def makeStellarmassBins(galaxy, basemap, bintype, outDir, binspace=0.25):
'''
Create bins for the data
basemap: map used to create bins (SpectralCube Projection)
bintype: type of bins ('intensity','stellarmass', 'radius')
outDir: directory to write output bin image
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added more comments plus moved GCR map
calculate up to other code.
9/16/2021 A.A. Kepley Fixing up bins
10/21/2021 A.A. Kepley More fixing up of bins.
'''
#Bin the basemap by brightness
# go a little lower and a little higher to make sure that you get
# the whole range and avoid rounding issues
minval = np.nanmin(basemap.value)*0.999
logminval = np.log10(minval*0.999)
logmaxval = np.log10(np.nanmax(basemap.value)*1.001)
nbins = int(np.round((logmaxval - logminval)/binspace))
binedge = np.logspace(logminval, logmaxval, num=nbins, base=10) * basemap.unit
bins = np.digitize(basemap.value, binedge.value) #this will automatically add an extra bin in the end for nan values
binlabels = ['{0:1.2f}'.format(i)+basemap.unit.to_string() for i in binedge] #need to add units to stellarmass map!!
## Set nan values to nonsense
bins[np.isnan(basemap.value)] = 99
# warn if valid valures are outside map min
if 0 in np.unique(bins):
print('WARNING: Value below map minimum\n')
if len(binedge) in np.unique(bins):
print('WARNING: Value above map maximum\n')
# make bins map
binmap = Projection(bins,wcs=basemap.wcs,header=basemap.header)
binmap.write(os.path.join(outDir,galaxy['NAME'].upper()+'_binsby'+bintype+'.fits'), overwrite=True)
return binmap, binedge, binlabels
def makeRadiusBins(galaxy, basemap, outDir, beam=15.0, r25=False):
'''
Create bins for the data
basemap: map used to create bins (SpectralCube Projection)
outDir: directory to write output bin image
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added more comments plus moved GCR map
calculate up to other code.
4/15/2021 A.A. Kepley Changes bins to be beam width apart in radius.
'''
if r25:
minrad = 0.05
maxrad = (90.0/3600.0) / galaxy['R25_DEG'] # go out to edge of the field.
binedge = np.arange(minrad, maxrad, 0.05)
binedge = np.insert(binedge,0,0) # insert the center of the galaxy
binedge = binedge * basemap.unit
else:
minrad = 0.0+beam/2.0
#maxrad = np.max(basemap).value + beam # want to add one bin beyond to capture max.
maxrad = 90.0 # go out to edge of field. radius ~ 60arcsec
binedge = np.arange(minrad, maxrad, beam)
binedge = np.insert(binedge,0,0) # insert the center of the galaxy
binedge = binedge * basemap.unit
bins = np.digitize(basemap.value,binedge.value)
# setting the outer edges to nonsense value
bins[bins==np.max(bins)] = 99
if 0 in np.unique(bins):
print('WARNING: Value below map minimum\n')
if len(binedge) in np.unique(bins):
print('WARNING: Value above map maximum\n')
binlabels = ['{0:1.2f} '.format(i) for i in binedge]
# make bins map
binmap=Projection(bins, wcs=basemap.wcs, header=basemap.header)
if r25:
binmap.write(os.path.join(outDir,galaxy['NAME'].upper()+'_binsbyr25.fits'), overwrite=True)
else:
binmap.write(os.path.join(outDir,galaxy['NAME'].upper()+'_binsbyradius.fits'), overwrite=True)
return binmap, binedge, binlabels
def plotBins(galaxy, binmap, binedge, binlabels, bintype, outDir):
'''
visualize binning
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added comments
4/29/2021 A.A. Kepley Simplified plotting code
'''
## TODO: can I fix the plots here with an explicit color map and normalize:
## norm = mpl.colors.Normalize(vmin=min(degas[dr1]['LOGMSTAR']), vmax=max(degas[dr1]['LOGMSTAR']))
## cscale = mpl.cm.ScalarMappable(norm=norm,cmap='viridis')
## for cscale, I want to probably set_extremes(under=None,over=None) so I can set the 99 values to gray or red and the masked values to gray or red as well.
## plt.imshow(c=cscale)
cmap = cm.get_cmap('viridis').copy()
cmap.set_over(color='gray')
cmap.set_under(color='gray')
maxval = np.max(binmap[binmap.value != 99].value)
mapvals = np.arange(1.0,maxval+1)
plt.subplot(projection=binmap.wcs)
heatmap = plt.imshow(binmap.value,origin='lower', vmin=1,vmax=maxval,cmap=cmap)
plt.xlabel('RA (J2000)')
plt.ylabel('Dec (J2000)')
plt.colorbar(heatmap, boundaries=binedge, values=mapvals,drawedges=True)
plt.savefig(os.path.join(outDir,galaxy['NAME'].upper()+'_binsby'+bintype+'.png'))
plt.clf()
plt.close()
def mapCO(galaxy, regridDir, outDir, mask=None ):
'''
get CO map for galaxy
galaxy: line from degas_base.fits with galaxy properties
regridDir: input directory with regridded data
outDir: output directory
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added comments and clarified inputs
10/7/2021 A.A. Kepley Removing sncut parameter and instead reading
in mom0 and error produced earlier.
'''
# read in CO cube
cofile = os.path.join(regridDir, galaxy['NAME']+'_12CO10_r21_simple_regrid.fits')
if not os.path.exists(cofile):
cofile = os.path.join(regridDir, galaxy['NAME']+'_12CO10_regrid.fits')
cube = SpectralCube.read(cofile).with_spectral_unit(u.km / u.s)
# read in CO moment map
mom0file = os.path.join(regridDir,galaxy['NAME']+'_12CO10_r21_simple_mom0_regrid.fits')
if not os.path.exists(mom0file):
mom0file = os.path.join(regridDir,galaxy['NAME']+'_12CO10_mom0_regrid.fits')
hdu = fits.open(mom0file)[0]
data = hdu.data
## is this what I want below?
if mask:
data[np.isnan(mask)]=np.nan #apply SN mask (SN >3)
comap = Projection(data,header=hdu.header,wcs=WCS(hdu.header),unit=hdu.header['BUNIT'])
comap.quicklook()
plt.savefig(os.path.join(outDir,galaxy['NAME']+'_CO.png'))
plt.clf()
plt.close()
return cube, comap
# sncut=3.0
# # read in cube
# cube = SpectralCube.read(os.path.join(regridDir, galaxy['NAME']+'_12CO_regrid.fits'))
# cube = cube.with_spectral_unit(u.km / u.s)
# # read in mask
# mask = SpectralCube.read(os.path.join(regridDir, galaxy['NAME']+'_12CO_mask_regrid.fits'))
# mask = mask.with_spectral_unit(u.km / u.s)
# # calculate noise
# madstd = cube.mad_std(how='cube') #K #raw cube
# chanwidth = np.abs(cube.spectral_axis[0]-cube.spectral_axis[1]) #channel width is same for all channels, km/s
# masksum = mask.sum(axis=0) #map number of unmasked pixels
# noise = np.sqrt(masksum)*(madstd*chanwidth) #moment0 error map, in K km/s ## TODO: CHECK MATH HERE
# #mask datacube
# masked_cube = cube.with_mask(mask==1.0*u.dimensionless_unscaled)
# mom0 = masked_cube.moment(order=0)
# snmap = mom0/noise #should be unitless #mom0 is from masked cube
# snmap[snmap==np.inf]=np.nan #convert division by zero to nan
# # write the resulting map to fits
# snmap.write(os.path.join(outDir, galaxy['NAME'].upper()+'_SNmap.fits'), overwrite=True)
# snmap.quicklook()
# plt.savefig(os.path.join(outDir, galaxy['NAME'].upper()+'_SNmap.png'))
# plt.close()
# plt.clf()
# #get rid of parts of mom0 where S/N < S/N cut
# mom0cut = mom0.copy()
# sn = np.nan_to_num(snmap.value)
# mom0cut[sn<sncut] = np.nan #blank out low sn regions
# #use sigma-clipped mom0 as new 2D mask for the original cube to preserve noise in signal-free channel
# mom0mask = ~np.isnan(mom0cut)
# masked_cube = cube.with_mask(mom0mask) #use for stacking later
# return mom0cut, masked_cube, mom0mask
def mapStellar(galaxy, regridDir, outDir, mask=None):
'''
make stellarmass map
galaxy: line from degas_base.fits table with galaxy information
mom0cut: S/N cut on mom0
regridDir: input data directory with all regridded data
outDir: output data directory
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added comments and clarified inputs
10/07/2021 A.A. Kepley made mask optional
'''
# open the stellar mass
stellarhdu = fits.open(os.path.join(regridDir,galaxy['NAME']+'_mstar_gauss15_regrid.fits'))[0]
stellar = stellarhdu.data
#stellar[starmask==1.0]=np.nan #apply star mask ## AAK: I think I can skip this.
if mask:
stellar[np.isnan(mask)] = np.nan #apply SN mask (SN >3)
w = WCS(stellarhdu.header)
hdrunit = stellarhdu.header['BUNIT'].replace('MSUN','Msun').replace('PC','pc')
stellarmap = Projection(stellar,header=stellarhdu.header,wcs=w, unit=hdrunit)
stellarmap.quicklook()
plt.savefig(os.path.join(outDir,galaxy['NAME']+'_stellarmass.png'))
plt.clf()
plt.close()
return stellarmap
def mapLTIR(galaxy, regridDir, outDir, mask=None):
'''
make LTIR map
galaxy: line from degas_base.fits table with galaxy information
mom0cut: S/N cut on mom0
regridDir: input data directory with all regridded data
outDir: output data directory
Date Programmer Description of Changes
----------------------------------------------------------------------
12/10/2020 A.A. Kepley Original code based on mapStellar
'''
hdu=fits.open(os.path.join(regridDir,galaxy['NAME']+'_LTIR_gauss15_regrid.fits'))[0]
data=hdu.data
if mask:
data[np.isnan(mask)]=np.nan #apply SN mask (SN >3)
LTIRmap=Projection(data,header=hdu.header,wcs=WCS(hdu.header),unit=hdu.header['BUNIT'])
LTIRmap.quicklook()
plt.savefig(os.path.join(outDir,galaxy['NAME']+'_LTIR.png'))
plt.clf()
plt.close()
return LTIRmap
def mapSFR(galaxy, regridDir, outDir, mask=None):
'''
import sfr map from W4+FUV
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/3/2020 A.A. Kepley Added comments and clarified inputs
10/7/2021 A.A. Kepley Made mask optional
'''
sfrhdu = fits.open(os.path.join(regridDir,galaxy['NAME']+'_sfr_fuvw4_gauss15_regrid.fits'))[0]
sfr = sfrhdu.data
if mask:
sfr[np.isnan(mask)] = np.nan
w = WCS(sfrhdu.header)
hdrunit = sfrhdu.header['BUNIT'].replace('MSUN','Msun')
hdrunit = hdrunit.replace("KPC","kpc")
hdrunit = hdrunit.replace('YR','yr')
sfrmap=Projection(sfr,header=sfrhdu.header,wcs=w, unit=hdrunit)
sfrmap.quicklook()
# save plot of map
plt.savefig(os.path.join(outDir,galaxy['NAME']+'_sfr.png'))
plt.clf()
plt.close()
return sfrmap
def mapGCR(galaxy, basemap):
'''
Create map of galactic radii
Date Programmer Description of Changes
----------------------------------------------------------------------
10/29/2020 Yiqing Song Original Code
12/03/2020 A.A. Kepley Tweak to how input data is handled and
added comments
'''
# get the data we need for the galaxy
ra = galaxy['RA_DEG']
dec = galaxy['DEC_DEG']
inc = np.radians(galaxy['INCL_DEG'])
pa = np.radians(galaxy['POSANG_DEG'])
r25 = galaxy['R25_DEG'] * u.deg
Dmpc = galaxy['DIST_MPC']
# get wcs
w = basemap.wcs
# get center
x0,y0 = w.all_world2pix(ra,dec,0,ra_dec_order=True)
# get coordinates
y = np.arange(0,np.shape(basemap)[0],1)
x = np.arange(0,np.shape(basemap)[1],1)
# create a 2d image of coordinates
xx,yy = np.meshgrid(x,y)
# calculate the radius in pixels
xx_new = x0+(xx-x0)*np.cos(pa)+(yy-y0)*np.sin(pa)
yy_new = y0-(xx-x0)*np.sin(pa)+(yy-y0)*np.cos(pa)
R = np.sqrt((xx_new-x0)**2/np.cos(inc)**2+(yy_new-y0)**2) #pixels
# now convert from pixels to actual units
head = basemap.header
pxscale = np.radians(np.abs(head['CDELT1'])) #radian/pixel
R_arcsec = np.degrees(R*pxscale)*3600.0 * u.arcsec# map of GCR in arcsec
R_kpc = R*pxscale*Dmpc*1000 * u.kpc# map of GCR in kpc
R_r25 = R_arcsec/r25.to(u.arcsec) # map of GCR in units of R25
Rarcsec_map = Projection(R_arcsec, header=head, wcs=w, unit=R_arcsec.unit)
Rkpc_map = Projection(R_kpc, header=head, wcs=w, unit=R_kpc.unit)
Rr25_map = Projection(R_r25, header=head, wcs=w)
# map of galactocentric radius in unit of kpc, arcmin, in r25
return Rarcsec_map, Rkpc_map, Rr25_map
| low-sky/degas | degas/analysis_stack.py | Python | gpl-3.0 | 50,584 | [
"Galaxy",
"Gaussian"
] | e169484a1f0f31b9cd2f3a88d19ecd2a3d20f79371f6a9291660594b6d3eb7f0 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import SUN, MON, TUE, WED, FRI, SAT
class Brazil(WesternCalendar, ChristianMixin):
"Brazil"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(4, 21, "Tiradentes' Day"),
(5, 1, "Labour Day"),
(9, 7, "Independence Day"),
(10, 12, "Our Lady of Aparecida"),
(11, 2, "All Souls' Day"),
(11, 15, "Republic Day"),
)
class BrazilSaoPauloState(Brazil):
"Brazil São Paulo State"
FIXED_HOLIDAYS = Brazil.FIXED_HOLIDAYS + (
(7, 9, "Constitutional Revolution of 1932"),
)
class BrazilSaoPauloCity(BrazilSaoPauloState):
"Brazil São Paulo City"
FIXED_HOLIDAYS = BrazilSaoPauloState.FIXED_HOLIDAYS + (
(1, 25, "Anniversary of the city of São Paulo"),
(11, 20, "Dia da Consciência Negra")
)
include_easter_sunday = True
include_corpus_christi = True
def get_carnaval(self, year):
return self.get_easter_sunday(year) - timedelta(days=47)
def get_variable_days(self, year):
days = super(BrazilSaoPauloCity, self).get_variable_days(year)
days.append((self.get_carnaval(year), "Carnaval"))
days.append((self.get_good_friday(year), "Sexta-feira da Paixão"))
return days
class Chile(WesternCalendar, ChristianMixin):
"Chile"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(5, 21, "Navy Day"),
(6, 29, "Saint Peter and Saint Paul"),
(7, 16, "Our Lady of Mount Carmel"),
(9, 18, "National holiday"),
(9, 19, "Army holiday"),
(10, 12, "Columbus Day"),
(12, 31, "Banking Holiday"),
)
include_good_friday = True
include_easter_saturday = True
include_assumption = True
include_all_saints = True
include_immaculate_conception = True
def get_variable_days(self, year):
days = super(Chile, self).get_variable_days(year)
september_17 = date(year, 9, 17)
if september_17.weekday() == MON:
days.append((september_17, '"Bridge" holiday'))
september_20 = date(year, 9, 20)
if september_20.weekday() == FRI:
days.append((september_20, '"Bridge" holiday'))
reformation_day = date(year, 10, 31)
if reformation_day.weekday() == WED:
reformation_day = date(year, 11, 2)
elif reformation_day.weekday() == TUE:
reformation_day = date(year, 10, 27)
days.append((reformation_day, "Reformation Day"))
return days
class Colombia(WesternCalendar, ChristianMixin):
"Colombia"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(7, 20, "Independence Day"),
(8, 7, "Boyacá Battle"),
)
include_palm_sunday = True
include_holy_thursday = True
include_good_friday = True
include_easter_sunday = True
include_corpus_christi = True
include_immaculate_conception = True
def get_epiphany(self, year):
base_day = date(year, 1, 6)
return Colombia.get_first_weekday_after(base_day, 0)
def get_saint_joseph(self, year):
base_day = date(year, 3, 19)
return Colombia.get_first_weekday_after(base_day, 0)
def get_ascension(self, year):
return self.get_easter_sunday(year) + timedelta(days=43)
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=64)
def get_sacred_heart(self, year):
return self.get_easter_sunday(year) + timedelta(days=71)
def get_saint_peter_and_saint_paul(self, year):
base_day = date(year, 6, 29)
return Colombia.get_first_weekday_after(base_day, 0)
def get_assumption(self, year):
base_day = date(year, 8, 15)
return Colombia.get_first_weekday_after(base_day, 0)
def get_race_day(self, year):
base_day = date(year, 10, 12)
return Colombia.get_first_weekday_after(base_day, 0)
def get_all_saints(self, year):
base_day = date(year, 11, 1)
return Colombia.get_first_weekday_after(base_day, 0)
def get_cartagena_independence(self, year):
base_day = date(year, 11, 11)
return Colombia.get_first_weekday_after(base_day, 0)
def get_variable_days(self, year):
days = super(Colombia, self).get_variable_days(year)
days.extend([
(self.get_epiphany(year), "Epiphany"),
(self.get_saint_joseph(year), "Saint Joseph"),
(self.get_ascension(year), "Ascension"),
(self.get_sacred_heart(year), "Sacred Heart"),
(self.get_saint_peter_and_saint_paul(year),
"Saint Peter and Saint Paul"),
(self.get_assumption(year), "Assumption of Mary to Heaven"),
(self.get_race_day(year), "Race Day"),
(self.get_all_saints(year), "All Saints"),
(self.get_cartagena_independence(year),
"Cartagena's Independence"),
])
return days
class Mexico(WesternCalendar, ChristianMixin):
"Mexico"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(9, 16, "Independence Day"),
)
def get_variable_days(self, year):
days = super(Mexico, self).get_variable_days(year)
days.append(
(Mexico.get_nth_weekday_in_month(year, 2, MON),
"Constitution Day"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 3, MON, 3),
"Benito Juárez's birthday"))
days.append(
(Mexico.get_nth_weekday_in_month(year, 11, MON, 3),
"Revolution Day"))
return days
def get_calendar_holidays(self, year):
days = super(Mexico, self).get_calendar_holidays(year)
# If any statutory day is on Sunday, the monday is off
# If it's on a Saturday, the Friday is off
for day, label in days:
if day.weekday() == SAT:
days.append((day - timedelta(days=1), "%s substitute" % label))
elif day.weekday() == SUN:
days.append((day + timedelta(days=1), "%s substitute" % label))
# Extra: if new year's day is a saturday, the friday before is off
next_new_year = date(year + 1, 1, 1)
if next_new_year.weekday():
days.append((date(year, 12, 31), "New Year Day substitute"))
return days
class Panama(WesternCalendar, ChristianMixin):
"Panama"
include_good_friday = True
include_easter_saturday = True
#include_easter_sunday = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 9, "Martyrs' Day"),
(5, 1, "Labour Day"),
(11, 3, "Independence Day"),
(11, 4, "Flag Day"),
(11, 5, "Colón Day"),
(11, 10, "Shout in Villa de los Santos"),
(12, 8, "Mothers' Day"),
)
def get_presidential_inauguration(self, year):
"""Might return empty"""
days = []
if year % 5 == 4:
days.append((date(year, 7, 1), "Presidential Inauguration"))
return days
def get_variable_days(self, year):
days = super(Panama, self).get_variable_days(year)
days.append(
(self.get_ash_wednesday(year) - timedelta(days=2), "Carnival Monday")
)
days.append(
(self.get_ash_wednesday(year) - timedelta(days=1), "Carnival Tuesday")
)
# http://web.archive.org/web/20140220013335/http://panama.usembassy.gov/holidays.html
days.append(
(self.get_first_weekday_after(date(year, 11, 28), 0), "Independence from Spain")
)
days.extend(self.get_presidential_inauguration(year))
return days
| gregn610/workalendar | workalendar/america.py | Python | mit | 7,958 | [
"COLUMBUS"
] | cdea4e6a1ab814d4e2e6f4a64a5497ccd20af3ee44224aeb897c8e4343d47a2a |
########################################################################
# File : ModuleFactory.py
# Author : Stuart Paterson
########################################################################
""" The Module Factory instantiates a given Module based on a given input
string and set of arguments to be passed. This allows for VO specific
module utilities to be used in various contexts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
class ModuleFactory(object):
#############################################################################
def __init__(self):
""" Standard constructor
"""
self.log = gLogger
#############################################################################
def getModule(self, importString, argumentsDict):
"""This method returns the Module instance given the import string and
arguments dictionary.
"""
try:
moduleName = importString.split('.')[-1]
modulePath = importString.replace('.%s' % (moduleName), '')
importModule = __import__('%s.%s' % (modulePath, moduleName), globals(), locals(), [moduleName])
except Exception as x:
msg = 'ModuleFactory could not import %s.%s' % (modulePath, moduleName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
try:
# FIXME: should we use imp module?
moduleStr = 'importModule.%s(argumentsDict)' % (moduleName)
moduleInstance = eval(moduleStr)
except Exception as x:
msg = 'ModuleFactory could not instantiate %s()' % (moduleName)
self.log.warn(x)
self.log.warn(msg)
return S_ERROR(msg)
return S_OK(moduleInstance)
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| yujikato/DIRAC | src/DIRAC/Core/Utilities/ModuleFactory.py | Python | gpl-3.0 | 1,864 | [
"DIRAC"
] | 0600ce5763a0c326522286d292551588116d3b286adeaa05e4daa37ff3f336cd |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlMoose(PerlPackage):
"""A postmodern object system for Perl 5"""
homepage = "http://search.cpan.org/~ether/Moose-2.2006/lib/Moose.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Moose-2.2006.tar.gz"
version('2.2010', '636238ac384818ee1e92eff6b9ecc50a')
version('2.2009', '5527b1a5abc29b5c57fc488447e76ccd')
version('2.2007', 'de487ae226003f7e7f22c0fd8f0074e6')
version('2.2006', '929c6b3877a6054ef617cf7ef1e220b5')
depends_on('perl-cpan-meta-check', type=('build', 'run'))
depends_on('perl-test-cleannamespaces', type=('build', 'run'))
depends_on('perl-devel-overloadinfo', type=('build', 'run'))
depends_on('perl-class-load-xs', type=('build', 'run'))
depends_on('perl-devel-stacktrace', type=('build', 'run'))
depends_on('perl-eval-closure', type=('build', 'run'))
depends_on('perl-sub-name', type=('build', 'run'))
depends_on('perl-module-runtime-conflicts', type=('build', 'run'))
depends_on('perl-devel-globaldestruction', type=('build', 'run'))
depends_on('perl-package-deprecationmanager', type=('build', 'run'))
depends_on('perl-package-stash-xs', type=('build', 'run'))
| krafczyk/spack | var/spack/repos/builtin/packages/perl-moose/package.py | Python | lgpl-2.1 | 2,438 | [
"MOOSE"
] | 3b1ffb2d5651eb0e49a3c8c382145ad072d4f85315536f6888ac87c10b093cff |
"""Acceptance tests for LMS-hosted Programs pages"""
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.catalog import CatalogFixture, CatalogConfigMixin
from common.test.acceptance.fixtures.programs import ProgramsFixture, ProgramsConfigMixin
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.programs import ProgramListingPage, ProgramDetailsPage
from openedx.core.djangoapps.catalog.tests import factories as catalog_factories
from openedx.core.djangoapps.programs.tests import factories as program_factories
class ProgramPageBase(ProgramsConfigMixin, CatalogConfigMixin, UniqueCourseTest):
"""Base class used for program listing page tests."""
def setUp(self):
super(ProgramPageBase, self).setUp()
self.set_programs_api_configuration(is_enabled=True)
self.programs = [catalog_factories.Program() for __ in range(3)]
self.course_run = catalog_factories.CourseRun(key=self.course_id)
self.stub_catalog_api()
def create_program(self, program_id=None, course_id=None):
"""DRY helper for creating test program data."""
course_id = course_id if course_id else self.course_id
run_mode = program_factories.RunMode(course_key=course_id)
course_code = program_factories.CourseCode(run_modes=[run_mode])
org = program_factories.Organization(key=self.course_info['org'])
if program_id:
program = program_factories.Program(
id=program_id,
status='active',
organizations=[org],
course_codes=[course_code]
)
else:
program = program_factories.Program(
status='active',
organizations=[org],
course_codes=[course_code]
)
return program
def stub_programs_api(self, programs, is_list=True):
"""Stub out the programs API with fake data."""
ProgramsFixture().install_programs(programs, is_list=is_list)
def stub_catalog_api(self):
"""Stub out the catalog API's program and course run endpoints."""
self.set_catalog_configuration(is_enabled=True)
CatalogFixture().install_programs(self.programs)
CatalogFixture().install_course_run(self.course_run)
def auth(self, enroll=True):
"""Authenticate, enrolling the user in the configured course if requested."""
CourseFixture(**self.course_info).install()
course_id = self.course_id if enroll else None
AutoAuthPage(self.browser, course_id=course_id).visit()
class ProgramListingPageTest(ProgramPageBase):
"""Verify user-facing behavior of the program listing page."""
def setUp(self):
super(ProgramListingPageTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
def test_no_enrollments(self):
"""Verify that no cards appear when the user has no enrollments."""
program = self.create_program()
self.stub_programs_api([program])
self.auth(enroll=False)
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_no_programs(self):
"""
Verify that no cards appear when the user has enrollments
but none are included in an active program.
"""
course_id = self.course_id.replace(
self.course_info['run'],
'other_run'
)
program = self.create_program(course_id=course_id)
self.stub_programs_api([program])
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_enrollments_and_programs(self):
"""
Verify that cards appear when the user has enrollments
which are included in at least one active program.
"""
program = self.create_program()
self.stub_programs_api([program])
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
@attr('a11y')
class ProgramListingPageA11yTest(ProgramPageBase):
"""Test program listing page accessibility."""
def setUp(self):
super(ProgramListingPageA11yTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
program = self.create_program()
self.stub_programs_api([program])
def test_empty_a11y(self):
"""Test a11y of the page's empty state."""
self.auth(enroll=False)
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
def test_cards_a11y(self):
"""Test a11y when program cards are present."""
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class ProgramDetailsPageA11yTest(ProgramPageBase):
"""Test program details page accessibility."""
def setUp(self):
super(ProgramDetailsPageA11yTest, self).setUp()
self.details_page = ProgramDetailsPage(self.browser)
program = self.create_program(program_id=self.details_page.program_id)
self.stub_programs_api([program], is_list=False)
def test_a11y(self):
"""Test the page's a11y compliance."""
self.auth()
self.details_page.visit()
self.details_page.a11y_audit.check_for_accessibility_errors()
| caesar2164/edx-platform | common/test/acceptance/tests/lms/test_programs.py | Python | agpl-3.0 | 6,048 | [
"VisIt"
] | e6bcee73e9db5959980149f0b75a448e3fe745de032b9dbf83e49bf5b34dc82a |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
**espressopp.interaction.Zero**
***************************************
This class provides methods for a zero potential
no interactions between particles, mainly used for debugging and testing
.. function:: espressopp.interaction.Zero()
.. function:: espressopp.interaction.VerletListZero(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListZero.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListZero.setFixedTupleList(ftpl)
:param ftpl:
:type ftpl:
.. function:: espressopp.interaction.VerletListZero.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressZero(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListAdressZero.setFixedTupleList(ftpl)
:param ftpl:
:type ftpl:
.. function:: espressopp.interaction.VerletListAdressZero.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressZero.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressZero(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListHadressZero.setFixedTupleList(ftpl)
:param ftpl:
:type ftpl:
.. function:: espressopp.interaction.VerletListHadressZero.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressZero.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListZero(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListZero.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListZero(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListZero.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Zero, \
interaction_VerletListZero, \
interaction_VerletListAdressZero, \
interaction_VerletListHadressZero, \
interaction_CellListZero, \
interaction_FixedPairListZero
class ZeroLocal(PotentialLocal, interaction_Zero):
def __init__(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_Zero)
class VerletListZeroLocal(InteractionLocal, interaction_VerletListZero):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListZero, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def setFixedTupleList(self, ftpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, ftpl)
class VerletListAdressZeroLocal(InteractionLocal, interaction_VerletListAdressZero):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressZero, vl)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setFixedTupleList(self, ftpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, ftpl)
class VerletListHadressZeroLocal(InteractionLocal, interaction_VerletListHadressZero):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressZero, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setFixedTupleList(self, ftpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedTupleList(self, ftpl)
class CellListZeroLocal(InteractionLocal, interaction_CellListZero):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListZero, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListZeroLocal(InteractionLocal, interaction_FixedPairListZero):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListZero, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class Zero(Potential):
'The Zero potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.ZeroLocal'
)
class VerletListZero(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListZeroLocal',
pmicall = ['setPotential', 'getPotential', 'setFixedTupleList']
)
class VerletListAdressZero(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressZeroLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressZero(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressZeroLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListZero(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListZeroLocal',
pmicall = ['setPotential']
)
class FixedPairListZero(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListZeroLocal',
pmicall = ['setPotential']
)
| capoe/espressopp.soap | src/interaction/Zero.py | Python | gpl-3.0 | 9,828 | [
"ESPResSo"
] | 486383b7ffea2f8d1c21d370b415a80dc4553144fccc603e860d70a817e34153 |
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
from itertools import compress
import os.path
from crumbs.seq.annotation import (PolyaAnnotator, EstscanOrfAnnotator,
BlastAnnotator)
from crumbs.seq.utils.seq_utils import append_to_description
from crumbs.utils.tags import SEQRECORD
from crumbs.seq.seq import SeqWrapper
class TranscriptOrientator(object):
'''This class orientates the transcripts
It can take into account: poly-A, ORFs and blast matches'''
def __init__(self, polya_params=None, estscan_params=None,
blast_params=None):
self._polya_params = polya_params
self._estscan_params = estscan_params
self._blast_params = blast_params
self._annotators = self._create_pipeline()
def _create_pipeline(self):
'It creates the annotation pipeline'
# pylint: disable=W0142
annotators = []
if self._polya_params:
annotators.append({'name': 'polyA'})
if self._estscan_params:
annotators.append({'name': 'estscan_orf'})
if self._blast_params:
for blast_param in self._blast_params:
annotators.append({'name': 'blast',
'blastdb': blast_param['blastdb']})
return annotators
@staticmethod
def _select_features_by_type(features, kind):
'it selects features by type'
return [feat for feat in features if feat.type == kind]
def _polya_selector(self, features):
'It selects the polya'
# pylint: disable=W0613
feats = self._select_features_by_type(features, 'polyA_sequence')
if feats:
return feats[0]
def _orf_selector(self, features):
'it returns the longest feature'
# pylint: disable=W0613
features = self._select_features_by_type(features, 'ORF')
if not features:
return None
lengths = [len(feat) for feat in features]
return features[lengths.index(max(lengths))]
def _match_part_selector(self, features, blastdb):
'it return the match_part with the best e-value'
blastdb = os.path.basename(blastdb)
features = self._select_features_by_type(features, 'match_part')
features = [f for f in features if f.qualifiers['blastdb'] == blastdb]
if not features:
return None
scores = [feat.qualifiers['score'] for feat in features]
return features[scores.index(min(scores))]
def _guess_orientations(self, seqs, annotator_name, blastdb):
'''It returns the orientation of the annotated transcripts.'''
orientations = []
for seq in seqs:
if annotator_name == 'polyA':
feature = self._polya_selector(seq.object.features)
elif annotator_name == 'estscan_orf':
feature = self._orf_selector(seq.object.features)
elif annotator_name == 'blast':
feature = self._match_part_selector(seq.object.features,
blastdb=blastdb)
else:
raise NotImplementedError('This annotator type not supported')
orientation = None if feature is None else feature.strand
orientations.append(orientation)
return orientations
def _get_annotator(self, annotator_name, blastdb):
'It prepares and returns the annotator'
if annotator_name == 'polyA':
annotator = PolyaAnnotator(**self._polya_params)
elif annotator_name == 'estscan_orf':
annotator = EstscanOrfAnnotator(**self._estscan_params)
elif annotator_name == 'blast':
blast_param = None
for blast_param_ in self._blast_params:
if blastdb == blast_param_['blastdb']:
blast_param = blast_param_
break
annotator = BlastAnnotator(**blast_param)
else:
raise NotImplementedError('This annotator type not supported')
return annotator
def __call__(self, seqs):
'It orientates seqs, that should have a SeqRecord in it'
orientations = None
orientation_log = [None] * len(seqs)
for annotator in self._annotators:
if orientations:
to_annalyze = [not o for o in orientations]
seqs_to_analyze = list(compress(seqs, to_annalyze))
else:
orientations = [None] * len(seqs)
seqs_to_analyze = seqs
annotator_name = annotator['name']
blastdb = annotator.get('blastdb', None)
annotator = self._get_annotator(annotator_name, blastdb)
annot_seqrecords = annotator(seqs_to_analyze)
annot_strands = self._guess_orientations(annot_seqrecords,
annotator_name,
blastdb=blastdb)
if blastdb:
annotator_name += ' ' + os.path.basename(blastdb)
analyzed_seqs_index = 0
for index, orientation in enumerate(orientations):
if orientation is None:
orientations[index] = annot_strands[analyzed_seqs_index]
if annot_strands[analyzed_seqs_index] == -1: # reverse
orientation_log[index] = annotator_name
analyzed_seqs_index += 1
# Now we reverse the seqs that we have guess that are reversed
reorientated_seqrecords = []
for orientation, seq, reason in zip(orientations, seqs,
orientation_log):
if orientation == -1:
rev_seqrecord = seq.object.reverse_complement(id=True,
description=True,
annotations=True,
features=True,
dbxrefs=True,
name=True)
seq = SeqWrapper(SEQRECORD, rev_seqrecord, None)
# we mark the reason why it has been reversed
text = '(reversed because of: {})'.format(reason)
append_to_description(seq, text)
reorientated_seqrecords.append(seq)
return reorientated_seqrecords
| JoseBlanca/ngs_crumbs | crumbs/seq/transcript_orientations.py | Python | gpl-3.0 | 7,261 | [
"BLAST"
] | 3592a1fde244596b3fd26b27233f4fb7da8774ca46c4878c06b6c3a1a1e40af2 |
# proxy module
from __future__ import absolute_import
from mayavi.tools.probe_data import *
| enthought/etsproxy | enthought/mayavi/tools/probe_data.py | Python | bsd-3-clause | 92 | [
"Mayavi"
] | 96af538fa0c1cf0ef0070ac2fbb2ba1fd7e7dc22e95d802d25b2291745833e49 |
#
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import espressomd
system = espressomd.System(box_l=[100, 100, 100])
system.time_step = 0.01
system.cell_system.skin = 0.1
for i in range(100):
system.part.add(pos=np.random.random() * system.box_l)
while True:
system.integrator.run(1000)
| espressomd/espresso | testsuite/python/sigint_child.py | Python | gpl-3.0 | 977 | [
"ESPResSo"
] | 6e2b57da3d9635dcf4b8e101ba8a6461e887881c1695547bb4fe000dd51792d6 |
"""Common settings and globals."""
from os import environ
from os.path import abspath, basename, dirname, join, normpath, exists
from sys import path
import json
from datetime import timedelta
# PATH CONFIGURATION
# Absolute filesystem path to the Django nyc_trees/nyc_trees directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level nyc_trees/ folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
SITE_ID = 1 # Needed by django.contrib.sites (used by Django flatpages)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# END PATH CONFIGURATION
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# STATSD CONFIGURATION
STATSD_CLIENT = 'django_statsd.clients.normal'
STATSD_PREFIX = 'django'
STATSD_HOST = environ.get('NYC_TREES_STATSD_HOST', 'localhost')
# END STATSD CONFIGURATION
# EMAIL CONFIGURATION
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_HELP_EMAIL = '[email protected]'
# END EMAIL CONFIGURATION
# FILE STORAGE CONFIGURATION
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# END FILE STORAGE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
# The Redis database at index 0 is used by Logstash/Beaver
'LOCATION': 'redis://{0}:{1}/1'.format(
environ.get('NYC_TREES_CACHE_HOST', 'localhost'),
environ.get('NYC_TREES_CACHE_PORT', 6379)),
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
'SOCKET_TIMEOUT': 3,
}
}
}
# Don't throw exceptions if Redis is down.
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
# END CACHE CONFIGURATION
# CELERY CONFIGURATION
BROKER_URL = 'redis://{0}:{1}/2'.format(
environ.get('NYC_TREES_CACHE_HOST', 'localhost'),
environ.get('NYC_TREES_CACHE_PORT', 6379))
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
STATSD_CELERY_SIGNALS = True
# END CELERY CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': environ.get('NYC_TREES_DB_NAME', 'nyc_trees'),
'USER': environ.get('NYC_TREES_DB_USER', 'nyc_trees'),
'PASSWORD': environ.get('NYC_TREES_DB_PASSWORD', 'nyc_trees'),
'HOST': environ.get('NYC_TREES_DB_HOST', 'localhost'),
'PORT': environ.get('NYC_TREES_DB_PORT', 5432),
'TEST_NAME': environ.get('NYC_TREES_TEST_DB_NAME', 'test_nyc_trees')
}
}
POSTGIS_VERSION = tuple(
map(int, environ.get('DJANGO_POSTGIS_VERSION', '2.1.3').split("."))
)
# END DATABASE CONFIGURATION
# LOGGING CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
# END LOGGING CONFIGURATION
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/New_York'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# This generates false positives and is being removed
# (https://code.djangoproject.com/ticket/23469)
SILENCED_SYSTEM_CHECKS = ['1_6.W001', '1_6.W002']
# END GENERAL CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = environ['DJANGO_MEDIA_ROOT']
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = environ['DJANGO_STATIC_ROOT']
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS # NOQA
STATICFILES_DIR = '/var/cache/nyc-trees/static/'
STATICFILES_DIRS = (
STATICFILES_DIR,
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders # NOQA
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# parse manifest created by gulp-rev-all
_STATIC_FILES_MAP = join(STATICFILES_DIR, 'rev-manifest.json')
if exists(_STATIC_FILES_MAP):
with open(_STATIC_FILES_MAP) as json_file:
STATIC_FILES_MAPPING = json.load(json_file)
else:
STATIC_FILES_MAPPING = {}
# END STATIC FILE CONFIGURATION
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"nyixt#@$+hra95q3_x96#erfzf0@*fc&q!u!aqs*xlls3ddd!w"
# END SECRET CONFIGURATION
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# END SITE CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS # NOQA
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
# END FIXTURE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors # NOQA
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'nyc_trees.context_processors.user_settings_privacy_url',
'nyc_trees.context_processors.config',
'nyc_trees.context_processors.my_events_now',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
# END TEMPLATE CONFIGURATION
# MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'waffle.middleware.WaffleMiddleware',
'nyc_trees.middleware.SoftLaunchMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
# END URL CONFIGURATION
AUTH_USER_MODEL = 'core.User'
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.flatpages',
'django.contrib.gis',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'registration',
'django_statsd',
'floppyforms',
'widget_tweaks',
'waffle',
'watchman',
)
# THIRD-PARTY CONFIGURATION
# django-registration-redux
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
# django-watchman
# Disable Storage checking, to avoid creating files on S3 on every health check
WATCHMAN_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
)
# END THIRD-PARTY CONFIGURATION
# Apps specific for this project go here.
LOCAL_APPS = (
'apps.core',
'apps.census_admin',
'apps.event',
'apps.home',
'apps.login',
'apps.survey',
'apps.users',
'apps.geocode',
'apps.mail'
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
# END WSGI CONFIGURATION
LOGIN_REDIRECT_URL = 'user_detail_redirect'
OMGEO_SETTINGS = [[
'omgeo.services.EsriWGS', {}
]]
# New York City bounds from http://www.mapdevelopers.com/geocode_bounding_box.php # NOQA
NYC_BOUNDS = (-74.259088, 40.495996, -73.700272, 40.915256)
# If geocoding a string produces no results, this string will be
# appended for a second attempt.
GEOCODE_FALLBACK_SUFFIX = ', New York, NY'
# The maximum number of blockface reservations per user
RESERVATIONS_LIMIT = 12
# How long blockface reservations will last
RESERVATION_TIME_PERIOD = timedelta(days=14)
TILER_URL = '//%s' % environ.get('TILER_HOST', 'localhost')
MAX_GROUP_IMAGE_SIZE_IN_BYTES = 102400 # 100 KB
SOFT_LAUNCH_REDIRECT_URL = "/"
SOFT_LAUNCH_REGEXES = [
r'^/user/',
r'^/accounts/',
r'^/login/',
r'^/faq/',
r'^/admin/',
r'^/health-check/',
]
RESERVATION_REMINDER_WINDOW = 4
| RickMohr/nyc-trees | src/nyc_trees/nyc_trees/settings/base.py | Python | apache-2.0 | 10,244 | [
"GULP"
] | 3b91e90bfe4a807d69e5da16e730c3316defbc9912f90875de640e2199354e00 |
__author__ = 'Mikhail Pedrosa <[email protected]> e Arthur Costa <[email protected]>'
__description__ = 'Methods Filtering - Moving Average, Gaussian, Median'
__version__ = '0.1'
__date__ = '13/04/2015'
import numpy as np
import scipy.ndimage as sp
import scipy.signal as sg
from astropy.convolution import Gaussian1DKernel
import matplotlib.pyplot as plt
from memory_profiler import profile
#@profile()
def vap_moving_average(values, window_size):
"""
:param values:
:param window_size:
:return:
"""
matrix = np.zeros((10, 360, 253))
for elevation in np.arange(10):
for rang in np.arange(253):
for azimuth in np.arange(360):
matrix[elevation, azimuth, rang] = np.nansum(values[elevation, azimuth:(azimuth + window_size), rang])
return matrix / float(window_size)
#@profile()
def vap_median(values, window_size):
"""
:param values:
:param window_size:
:return:
"""
matrix = np.zeros((10, 360, 253))
for elevation in np.arange(10):
for rang in np.arange(253):
for azimuth in np.arange(360):
matrix[elevation, azimuth, rang] = np.nanmedian(values[elevation, azimuth:(azimuth + window_size), rang])
return matrix / float(window_size)
#@profile()
def vap_gaussian(values, window_size):
"""
:param values:
:param window_size:
:return:
"""
return
#@profile()
def moving_average(values, window_size):
"""
:param values:
:param window_size:
:return:
"""
#window = np.ones(window_size)/float(window_size)
#np.convolve(values, window, 'same')
matrix = np.zeros((len(values),))
for index in np.arange(len(values)):
matrix[index] = np.nansum(values[index:(index + window_size)])
return matrix / float(window_size)
#@profile()
def median(values, window_size):
"""
:param values:
:param window_size:
:return:
"""
matrix = np.zeros((len(values),))
for index in np.arange(len(values)):
matrix[index] = np.nanmedian(values[index:(index + window_size)])
return matrix
#@profile()
def gaussian(values):
"""
:param values:
:param sigma:
:return:
"""
windows = [0.15, 0.75, 0.15]
array = np.ma.masked_array(values, np.isnan(values))
values = array.filled(np.nan)
filter_gauss = np.convolve(values, windows, mode='same')
return filter_gauss
#@profile()
def moving_triangle():
return
def gauss(n,sigma):
r = range(-int(n/2),int(n/2)+1)
return [1 / sigma * np.sqrt(2*np.pi) * np.exp(-float(x)**2/(2*sigma**2)) for x in r] | mikhailpedrosa/radar_wind-field | filters.py | Python | gpl-2.0 | 2,642 | [
"Gaussian"
] | 0d94aaa99bf05d91e9dc6765869d8a2466a48893e594134e70fcbdcf99e13ad4 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from functools import wraps
from collections import defaultdict, OrderedDict
import numpy as np
from compliance_checker.base import BaseCheck, BaseNCCheck, check_has, score_group, Result
from compliance_checker.cf.appendix_d import dimless_vertical_coordinates
from compliance_checker.cf.util import NCGraph, StandardNameTable, units_known, units_convertible, units_temporal, map_axes, find_coord_vars, is_time_variable, is_vertical_coordinate, _possiblet, _possiblez, _possiblex, _possibley, _possibleaxis, _possiblexunits, _possibleyunits, _possibletunits, _possibleaxisunits
from netCDF4 import Dimension, Variable
from sets import Set
def print_exceptions(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
from traceback import print_exc
print_exc()
return wrapper
__stdname_table__="v29"
def guess_dim_type(dimension):
"""
Guesses the type of dimension of a variable X/Y/Z/T
If can't figure it out, None is returned.
"""
dimclasses = {u'T':_possiblet,
u'Z':_possiblez,
u'Y':_possibley,
u'X':_possiblex}
for dcname, dcvals in dimclasses.iteritems():
if dimension in dcvals:
return dcname
return None
def guess_coord_type(units, positive=None):
"""
Guesses coordinate variable type (X/Y/Z/T) from units and positive attrs
"""
coord_types = {u'X':[u'degrees_east', u'degree_east', u'degrees_E', u'degree_E', u'degreesE', u'degreeE'],
u'Y':[u'degrees_north', u'degree_north', u'degrees_N', u'degree_N', u'degreesN', u'degreeN']}
deprecated = [u'level', u'layer', u'sigma_level']
if not units or not isinstance(units, basestring) or not (units_known(units) or units in deprecated):
return None
if isinstance(positive, basestring) and positive.lower() in [u'up', u'down']:
# only Z if units without positive deos not result in something else
if guess_coord_type(units, None) in [None, u'Z']:
return u'Z'
else:
# they differ, then we can't conclude
return None
if units in deprecated or units_convertible(units, u'hPa', reftimeistime=True):
return u'Z'
if positive:
return None
for ctype, unitsposs in coord_types.iteritems():
if units in unitsposs:
return ctype
if units_convertible(units, u'days since 1970-01-01', reftimeistime=False):
return u'T'
return None
def is_variable(name, var):
dims = var.dimensions
if (name,) == dims:
# Coordinate Type
return False
# Probably a variable
return True
# helper to see if we should do DSG tests
def is_likely_dsg(func):
@wraps(func)
def _dec(s, ds):
if hasattr(ds.dataset, u'featureType'):
return func(s, ds)
# @TODO: skips if we have formalized skips
return None
return _dec
class CFBaseCheck(BaseCheck):
register_checker = True
name = u'cf'
@classmethod
def beliefs(cls): # @TODO
return {}
"""
CF Convention Checker (1.6)
These checks are translated documents:
http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/cf-conventions.html
http://cf-pcmdi.llnl.gov/conformance/requirements-and-recommendations/1.6/
"""
def __init__(self):
self._coord_vars = defaultdict(list)
self._ancillary_vars = defaultdict(list)
self._clim_vars = defaultdict(list)
self._boundary_vars = defaultdict(dict)
self._std_names = StandardNameTable(u'cf-standard-name-table.xml')
################################################################################
#
# Helper Methods - var classifications, etc
#
################################################################################
def setup(self, ds):
self._find_coord_vars(ds)
self._find_ancillary_vars(ds)
self._find_clim_vars(ds)
self._find_boundary_vars(ds)
def _find_coord_vars(self, ds, refresh=False):
"""
Finds all coordinate variables in a dataset.
A variable with the same name as a dimension is called a coordinate variable.
The result is cached by the passed in dataset object inside of this checker. Pass refresh=True
to redo the cached value.
"""
if ds in self._coord_vars and not refresh:
return self._coord_vars[ds]
self._coord_vars[ds] = find_coord_vars(ds.dataset)
return self._coord_vars[ds]
def _find_ancillary_vars(self, ds, refresh=False):
"""
Finds all ancillary variables in a dataset.
TODO: fully define
An ancillary variable generally is a metadata container and referenced from
other variables via a string reference in an attribute.
- via ancillary_variables (3.4)
- "grid mapping var" (5.6)
- TODO: more?
The result is cached by the passed in dataset object inside of this checker. Pass refresh=True
to redo the cached value.
"""
if ds in self._ancillary_vars and not refresh:
return self._ancillary_vars[ds]
for name, var in ds.dataset.variables.iteritems():
if hasattr(var, u'ancillary_variables'):
for anc_name in var.ancillary_variables.split(" "):
if anc_name in ds.dataset.variables:
self._ancillary_vars[ds].append(ds.dataset.variables[anc_name])
if hasattr(var, u'grid_mapping'):
gm_name = var.grid_mapping
if gm_name in ds.dataset.variables:
self._ancillary_vars[ds].append(ds.dataset.variables[gm_name])
return self._ancillary_vars
def _find_data_vars(self, ds):
"""
Finds all variables that could be considered Data variables.
Returns a dictionary mapping name -> variable.
Excludes variables that are:
- coordinate variables
- ancillary variables
- no dimensions
Results are NOT CACHED.
"""
return {k:v for k, v in ds.dataset.variables.iteritems() if v not in self._find_coord_vars(ds) \
and v not in self._find_ancillary_vars(ds) \
and v.dimensions}
def _find_clim_vars(self, ds, refresh=False):
"""
Finds all climatology variables in a dataset.
Climatology variables (7.4)
Cached.
"""
if ds in self._clim_vars and not refresh:
return self._clim_vars[ds]
c_time = set() # set of climatological time axes
c_vars = set() # set of climatological variables
coord_vars = self._find_coord_vars(ds)
# find all time dimension variables
time_vars = [v for v in coord_vars if guess_dim_type(v.dimensions[0]) == u'T']
for k, v in ds.dataset.variables.iteritems():
is_cvar = False
if v in coord_vars:
if hasattr(v, u'climatology') and not hasattr(v, u'bounds'):
is_cvar = True
if k in time_vars and hasattr(v, u'units'):
units_split = v.units.split()
if len(units_split) == 3 and units_split[1:] == [u'since', u'0-1-1']:
is_cvar = True
if is_cvar:
c_time.add(v)
else:
# check cell_methods
if hasattr(v, u'cell_methods'):
try:
cell_methods = parse_cell_methods(v.cell_methods)
except:
pass
dvars = set(ds.dataset.variables.itervalues()) - set(coord_vars)
for dim in c_time:
for v in dvars:
if dim in v.dimensions:
c_vars.add(v)
self._clim_vars[ds] = c_vars
return c_vars
def _find_boundary_vars(self, ds, refresh=False):
"""
Returns dict of coordinates vars -> associated boundary vars
Cached.
"""
if ds in self._boundary_vars and not refresh:
return self._boundary_vars[ds]
b_vars = {}
for k, v in ds.dataset.variables.iteritems():
bounds = getattr(v, u'bounds', None)
if bounds is not None and isinstance(bounds, basestring) and bounds in ds.dataset.variables:
b_vars[v] = ds.dataset.variables[bounds]
self._boundary_vars[ds] = b_vars
return b_vars
###############################################################################
#
# CHAPTER 2: NetCDF Files and Components
#
###############################################################################
def check_data_types(self, ds):
"""
2.2 The netCDF data types char, byte, short, int, float or real, and double are all acceptable
"""
fails = []
total = len(ds.dataset.variables)
for k, v in ds.dataset.variables.iteritems():
if v.dtype not in [np.character,
np.dtype('c'),
np.dtype('b'),
np.dtype('i4'),
np.int32,
np.float32,
np.double,
'int16',
'float32'
]:
fails.append((u'The variable %s failed because the datatype is %s' %(k, v.datatype)))
return Result(BaseCheck.HIGH, (total - len(fails), total), u'§2.2 Valid netCDF data types', msgs=fails)
def check_naming_conventions(self, ds):
"""
2.3 Variable, dimension and attribute names should begin with a letter and be composed of letters, digits, and underscores.
"""
fails = []
total = len(ds.dataset.variables)
rname = re.compile("[A-Za-z][A-Za-z0-9_]*")
for k, v in ds.dataset.variables.iteritems():
if not rname.match(k):
fails.append(u'Variable %s failed because it does not start with a letter, digit, or an underscore' %k)
return Result(BaseCheck.HIGH, (total - len(fails), total), u'§2.3 Legal variable names', fails)
def check_names_unique(self, ds):
"""
2.3 names should not be distinguished purely by case, i.e., if case is disregarded, no two names should be the same.
"""
fails = []
total = len(ds.dataset.variables)
names = defaultdict(int)
for k in ds.dataset.variables:
names[k.lower()] += 1
fails = [u'Variables are not case sensitive. Duplicate variables named: %s' %k for k,v in names.iteritems() if v > 1]
return Result(BaseCheck.LOW, (total - len(fails), total), u'§2.3 Unique variable names', msgs=fails)
def check_dimension_names(self, ds):
"""
2.4 A variable may have any number of dimensions, including zero, and the dimensions must all have different names.
"""
fails = []
total = len(ds.dataset.variables)
for k, v in ds.dataset.variables.iteritems():
dims = defaultdict(int)
for d in v.dimensions:
dims[d] += 1
for dimension, count in dims.iteritems():
if count > 1:
fails.append("%s has two or more dimensions named %s" % (k, dimension))
return Result(BaseCheck.HIGH, (total - len(fails), total), u'§2.4 Unique dimensions', msgs=fails)
def check_dimension_order(self, ds):
"""
2.4 If any or all of the dimensions of a variable have the interpretations of "date or time" (T), "height or depth" (Z),
"latitude" (Y), or "longitude" (X) then we recommend, those dimensions to appear in the relative order T, then Z, then Y,
then X in the CDL definition corresponding to the file. All other dimensions should, whenever possible, be placed to the
left of the spatiotemporal dimensions.
"""
fails = []
total = len(ds.dataset.variables)
expected = [u'T', u'Z', u'Y', u'X']
for k, v in ds.dataset.variables.iteritems():
dclass = map(guess_dim_type, v.dimensions)
# any nones should be before classified ones
nones = [i for i, x in enumerate(dclass) if x is None]
nonnones = [i for i, x in enumerate(dclass) if x is not None]
if len(nones) and len(nonnones) and max(nones) > min(nonnones):
fails.append("Variable %s has a non-space-time dimension after space-time-dimensions"%k)
# classified ones should be in correct order
nonnones = [expected.index(x) for x in dclass if x is not None]
nonnones_sorted = sorted(nonnones)
if nonnones != nonnones_sorted:
fails.append("The dimensions for %s are not in T Z Y X order"%k)
# there are two checks here per variable so totals must be doubled
return Result(BaseCheck.LOW, (total*2 - len(fails), total*2), u'§2.4 Dimension order', msgs=fails)
#def check_dimension_single_value_applicable(self, ds):
"""
2.4 When a single value of some coordinate applies to all the values in a variable, the recommended means of attaching this
information to the variable is by use of a dimension of size unity with a one-element coordinate variable. It is also
acceptable to use a scalar coordinate variable which eliminates the need for an associated size one dimension in the data
variable.
"""
#TODO: We need to identify a non-compliant example of this that can be verified, but I believe
# that if the file is netCDF then this requirement may be met. When we do we can reinsert this check
#pass
def check_fill_value_outside_valid_range(self, ds):
"""
2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable.
"""
ret = []
for k, v in ds.dataset.variables.iteritems():
if hasattr(v, u'_FillValue'):
attrs = v.ncattrs()
if u'valid_range' in attrs:
rmin, rmax = v.valid_range
spec_by = u'valid_range'
elif u'valid_min' in attrs and u'valid_max' in attrs:
rmin = v.valid_min
rmax = v.valid_max
spec_by = u'valid_min/valid_max'
else:
continue
valid = not (v._FillValue >= rmin and v._FillValue <= rmax)
reasoning = []
if not valid:
reasoning = ["%s must not be in valid range (%s to %s) as specified by %s" % (v._FillValue, rmin, rmax, spec_by)]
ret.append(Result(BaseCheck.HIGH, valid, (u'§2.5.1 _FillValue outside of valid range', k), msgs=reasoning))
return ret
def check_conventions_are_cf_16(self, ds):
"""
2.6.1 the NUG defined global attribute Conventions to the string value "CF-1.6"
"""
valid_conventions = [u'CF-1.0', u'CF-1.1', u'CF-1.2', u'CF-1.3',
u'CF-1.4', u'CF-1.5', u'CF-1.6']
if hasattr(ds.dataset, u'Conventions'):
conventions = re.split(',|\s+', getattr(ds.dataset, 'Conventions', ''))
if any((c.strip() in valid_conventions for c in conventions)):
valid = True
reasoning = [u'Conventions field is "CF-1.x (x in 0-6)"']
else:
valid = False
reasoning = [u'Conventions field is not "CF-1.x (x in 0-6)"']
else:
valid = False
reasoning = [u'Conventions field is not present']
return Result(BaseCheck.HIGH, valid, u'§2.6.1 Global Attribute Conventions includes CF-1.6', msgs=reasoning)
@score_group(u'§2.6.2 Convention Attributes')
def check_convention_globals(self, ds):
"""
2.6.2 title/history global attributes, must be strings. Do not need to exist.
"""
attrs = [u'title', u'history']
ret = []
for a in attrs:
if hasattr(ds.dataset, a):
ret.append(Result(BaseCheck.HIGH, isinstance(getattr(ds.dataset, a), basestring), (u'§2.6.2 Title/history global attributes', a)))
return ret
@score_group(u'§2.6.2 Convention Attributes')
def check_convention_possibly_var_attrs(self, ds):
"""
2.6.2 institution, source, references, and comment, either global or assigned to individual variables.
When an attribute appears both globally and as a variable attribute, the variable's version has precedence.
Must be strings.
"""
attrs = [u'institution', u'source', u'references', u'comment']
ret = []
# check attrs on global ds
# can't predetermine total - we only report attrs we find
for k, v in ds.dataset.variables.iteritems():
vattrs = v.ncattrs()
for a in attrs:
if a in vattrs:
ret.append(Result(BaseCheck.HIGH, isinstance(getattr(v, a), basestring), (u'§2.6.2 Description of file contents', a)))
return ret
###############################################################################
#
# CHAPTER 3: Description of the Data
#
###############################################################################
def check_units(self, ds):
"""
3.1 The units attribute is required for all variables that represent dimensional quantities
(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
defined in Section 7.4, "Climatological Statistics").
Units are not required for dimensionless quantities. A variable with no units attribute is assumed
to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
included.
- units required
- type must be recognized by udunits
- if std name specified, must be consistent with standard name table, must also be consistent with a
specified cell_methods attribute if present
"""
ret_val = []
deprecated = [u'level', u'layer', u'sigma_level']
clim_vars = self._find_clim_vars(ds)
boundary_vars = self._find_boundary_vars(ds).itervalues()
container_vars = self._find_container_variables(ds)
for k, v in ds.dataset.variables.iteritems():
# skip climatological vars, boundary vars
if v in clim_vars or \
v in boundary_vars or \
k in container_vars:
continue
# skip string type vars
if (isinstance(v.dtype, type) and issubclass(v.dtype, basestring)) or v.dtype.char == 'S':
continue
# skip quality control vars
if hasattr(v, u'flag_meanings'):
continue
if hasattr(v, u'standard_name') and u'status_flag' in v.standard_name:
continue
# skip DSG cf_role
if hasattr(v, "cf_role"):
continue
units = getattr(v, u'units', None)
# 1) "units" attribute must be present
presence = Result(BaseCheck.HIGH, units is not None, u'§3.1 Variables contain units')
if not presence.value:
presence.msgs = [u'units attribute is required for %s' % k]
ret_val.append(presence)
continue
# 2) units attribute must be a string
astring = Result(BaseCheck.HIGH, isinstance(units, basestring), u'§3.1 units attribute is a string')
if not astring.value:
astring.msgs = ["units not a string (%s) for %s" % (type(units), k)]
ret_val.append(astring)
continue
# now, units are present and string
# 3) units are not deprecated
resdeprecated = Result(BaseCheck.LOW, not units in deprecated, u'§3.1 Variables contain valid units')
if not resdeprecated.value:
resdeprecated.msgs = [u'units (%s) is deprecated for %s' % (units, k)]
ret_val.append(resdeprecated)
continue
# 4) units are known
knownu = Result(BaseCheck.HIGH, units_known(units), u'§3.1 Variables contain valid CF Units')
if not knownu.value:
knownu.msgs = [u'unknown units type (%s) for %s' % (units, k)]
ret_val.append(knownu)
#continue
# units look ok so far, check against standard name / cell methods
std_name = getattr(v, u'standard_name', None)
std_name_modifier = None
if isinstance(std_name, basestring):
if u' ' in std_name:
std_name, std_name_modifier = std_name.split(u' ', 1)
# if no standard name or cell_methods, nothing left to do
if std_name is None and not hasattr(v, u'cell_methods'):
#ret_val.append(Result(BaseCheck.HIGH, True, ('units', k, 'ok')))
continue
# 5) if a known std_name, use the units provided
if std_name is not None and std_name in self._std_names:
std_units = self._std_names[std_name].canonical_units
#@TODO modifiers changes units
msgs = []
valid = True
if units is not None:
if std_name == 'time' and units.split(" ")[0] in [u'day', u'days', u'd', u'hour', u'hours', u'hr', u'hrs', u'h', u'year', u'years', u'minute', u'minutes', u'm', u'min', u'mins', u'second', u'seconds', u's', u'sec', u'secs']:
if len(units.split(" "))>1:
if units.split(" ")[1] == u'since':
std_units = units
else:
std_units = units
if std_units == u'm' and units in [u'meter', u'meters']:
std_units = units
if units != std_units and units not in [u'degrees_north', u'degree_N', u'degreeN', u'degreesN', u'degrees_east', u'degree_E', u'degreeE', u'degreesE'] and not units_convertible(units, std_units):
msgs = [u'units are %s, standard_name units should be %s' % (units, std_units)]
valid = False
else:
valid = False
msgs = [u'The unit for variable %s in of type None.'%name]
ret_val.append(Result(BaseCheck.HIGH, valid, u'§3.1 Variables contain valid units for the standard_name', msgs))
# 6) cell methods @TODO -> Isnt this in the check_cell_methods section?
#if hasattr(v, 'cell_methods'):
# cell_methods = v.cell_methods
#
# # placemarker for future check
# ret_val.append(Result(BaseCheck.HIGH, False, ('units', k, 'cell_methods'), ['TODO: implement cell_methods check']))
return ret_val
def check_standard_name(self, ds):
"""
3.3 A standard name is associated with a variable via the attribute standard_name which takes a
string value comprised of a standard name optionally followed by one or more blanks and a
standard name modifier
"""
ret_val = []
for k, v in ds.dataset.variables.iteritems():
std_name = getattr(v, u'standard_name', None)
std_name_modifier = None
# no standard name? is ok by the letter of the law
if std_name is None:
continue
if isinstance(std_name, basestring):
if ' ' in std_name:
std_name, std_name_modifier = std_name.split(' ', 1)
# 1) standard name is a string and in standard name table or an exception, see H.2
msgs = []
is_str = isinstance(std_name, basestring)
in_exception = std_name in (u'platform_name', u'station_name', u'instrument_name')
in_table = std_name in self._std_names
if not is_str:
msgs.append("The standard name '%s' is not of type string. It is type %s" % (std_name, type(std_name)))
if not in_table and not in_exception:
msgs.append("The standard name '%s' is not in standard name table" % std_name)
ret_val.append(Result(BaseCheck.HIGH, is_str and in_table, u'§3.3 Standard Names', msgs))
# 2) optional - if modifiers, should be in table
if std_name_modifier:
allowed = [u'detection_minimum',
u'number_of_observations',
u'standard_error',
u'status_flag']
msgs = []
if not std_name_modifier in allowed:
msgs.append("modifier (%s) not allowed" % std_name_modifier)
ret_val.append(Result(BaseCheck.HIGH, std_name_modifier in allowed, u'§3.3 Standard Names', msgs))
return ret_val
def check_ancillary_data(self, ds):
"""
3.4 It is a string attribute whose value is a blank separated list of variable names.
The nature of the relationship between variables associated via ancillary_variables must
be determined by other attributes. The variables listed by the ancillary_variables attribute
will often have the standard name of the variable which points to them including a modifier
(Appendix C, Standard Name Modifiers) to indicate the relationship.
"""
ret_val = []
for k, v in ds.dataset.variables.iteritems():
anc = getattr(v, u'ancillary_variables', None)
if anc is None:
continue
# should be a string, splittable, and each should exist
anc_result = Result(BaseCheck.HIGH, name=(u'§3.4 Ancillary Variables', k))
msgs = []
if not isinstance(anc, basestring):
anc_result.value = False
anc_result.msgs = ["ancillary_variables is not a string"]
ret_val.append(anc_result)
continue
ancs = anc.split()
existing = 0
for a in ancs:
if a in ds.dataset.variables:
existing += 1
else:
msgs.append("ancillary var %s does not exist" % a)
anc_result.value = (existing, len(ancs))
anc_result.msgs = msgs
ret_val.append(anc_result)
return ret_val
def check_flags(self, ds):
"""
3.5 The attributes flag_values, flag_masks and flag_meanings are intended to make variables
that contain flag values self describing. Status codes and Boolean (binary) condition flags may be
expressed with different combinations of flag_values and flag_masks attribute definitions.
The flag_values and flag_meanings attributes describe a status flag consisting of mutually exclusive coded values.
The flag_meanings attribute is a string whose value is a blank separated list of descriptive words
or phrases, one for each flag value. Each word or phrase should consist of characters from
the alphanumeric set and the following five: '_', '-', '.', '+', '@'.
The flag_masks and flag_meanings attributes describe a number of independent Boolean conditions
using bit field notation by setting unique bits in each flag_masks value.
The flag_masks, flag_values and flag_meanings attributes, used together, describe a blend of
independent Boolean conditions and enumerated status codes. A flagged condition is identified
by a bitwise AND of the variable value and each flag_masks value; a result that matches the
flag_values value indicates a true condition.
"""
ret_val = []
for k, v in ds.dataset.variables.iteritems():
flag_values = getattr(v, "flag_values", None)
flag_masks = getattr(v, "flag_masks", None)
flag_meanings = getattr(v, "flag_meanings", None)
if not (flag_values is not None or flag_masks is not None):
continue
# 1) flags_values attribute must have same type as variable to which it is attached
if flag_values is not None:
fvr = Result(BaseCheck.HIGH, flag_values.dtype == v.dtype, name=u'§3.5 Flags and flag attributes')
if not fvr.value:
fvr.msgs = [u'flag_values attr does not have same type as var (fv: %s, v: %s)' % (flag_values.dtype, v.dtype)]
ret_val.append(fvr)
# 2) if flag_values, must have flag_meanings
fmr = Result(BaseCheck.HIGH, flag_meanings is not None, name=u'§3.5 Flags and flag attributes')
if not fmr.value:
fmr.msgs = [u'flag_meanings must be present']
ret_val.append(fmr)
# flag_values must be a list (array), not just a single value
result_name = u'§3.5 Flags and flag attributes'
fvlist = Result(BaseCheck.HIGH, not np.isscalar(flag_values), result_name)
if not fvlist.value:
fvlist.msgs = [u'flag_values must be a list']
# convert to an array so the remaining checks can be applied
flag_values = np.array([flag_values])
# 8) flag_values attribute values must be mutually exclusive
fvset = set(flag_values)
fvsr = Result(BaseCheck.HIGH, len(fvset) == len(flag_values), u'§3.5 Flags and flag attributes')
if not fvsr.value:
fvsr.msgs = [u'repeated items in flag_values']
ret_val.append(fvsr)
# 3) type of flag_meanings is a string, blank separated list of words
if flag_meanings is not None:
fmt = Result(BaseCheck.HIGH, isinstance(flag_meanings, basestring), name=u'§3.5 Flags and flag attributes')
if not fmt.value:
fmt.msgs = [u'flag_meanings must be a string']
ret_val.append(fmt)
# split and check each word
rflags = re.compile("^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
msgs = []
ok_count = 0
for fm in meanings:
if rflags.match(fm) is not None:
ok_count += 1
else:
msgs.append("flag_meaning %s of var %s is incorrectly named" % (fm, k))
ret_val.append(Result(BaseCheck.HIGH, (ok_count, len(meanings)), name=u'§3.5 Flags and flag attributes', msgs=msgs))
# now that we've split meanings up, check length vs values/masks
# 4) number of flag_values must equal number of meanings
if flag_values is not None:
fvfmr = Result(BaseCheck.HIGH, len(flag_values) == len(meanings), u'§3.5 Flags and flag attributes')
if not fvfmr.value:
fvfmr.msgs = [u'flag_values length (%d) not equal to flag_meanings length (%d)' % (len(flag_values), len(meanings))]
ret_val.append(fvfmr)
# 5) number of flag_masks must equal number of meanings
if flag_masks is not None:
fmfmr = Result(BaseCheck.HIGH, len(flag_masks) == len(meanings), u'§3.5 Flags and flag attributes')
if not fmfmr.value:
fmfmr.msgs = [u'flag_masks length (%d) not equal to flag_meanings length (%d)' % (len(flag_masks), len(meanings))]
ret_val.append(fmfmr)
# 6) flag_masks must have same type as var and those vars must be compatible with bit field expressions
if flag_masks is not None:
msgs = []
ok_count = 0
same_type = flag_masks.dtype == v.dtype
type_ok = v.dtype in [np.character,
np.dtype('b'),
np.dtype('i4'),
np.int32]
if same_type:
ok_count += 1
else:
msgs.append("flag_masks is not same type as v (fm: %s, v: %s)" % (flag_masks.dtype, v.dtype))
if type_ok:
ok_count += 1
else:
msgs.append("variable not of appropriate type to have flag_masks (%s)" % (v.dtype))
ret_val.append(Result(BaseCheck.HIGH, (ok_count, 2), u'§3.5 Flags and flag attributes', msgs=msgs))
# 7) the flag_masks attribute values must be non-zero
zeros = [x for x in flag_masks if x == 0]
msgs = []
if len(zeros):
msgs = [u'flag_masks attribute values contains a zero']
ret_val.append(Result(BaseCheck.HIGH, len(zeros) != 0, u'§3.5 Flags and flag attributes', msgs=msgs))
# 9) when both defined, boolean AND of each entry in flag_values with corresponding entry in flag_masks
# should equal the flags_value entry
if flag_values is not None and flag_masks is not None:
allv = map(lambda a, b: a & b == a, zip(flag_values, flag_masks))
allvr = Result(BaseCheck.MEDIUM, all(allv), u'§3.5 Flags and flag attributes')
if not allvr.value:
allvr.msgs = ["flag masks and flag values combined don't equal flag value"]
ret_val.append(allvr)
return ret_val
###############################################################################
#
# CHAPTER 4: Coordinate Types
#
###############################################################################
def check_coordinate_axis_attr(self, ds):
"""
4 The attribute axis may be attached to a coordinate variable and given one of the values X, Y, Z or T
which stand for a longitude, latitude, vertical, or time axis respectively. Alternatively the standard_name
attribute may be used for direct identification.
"""
ret_val = []
coord_vars = self._find_coord_vars(ds)
dim_to_axis = map_axes({k:v for k,v in ds.dataset.variables.iteritems() if v in coord_vars}, reverse_map=True)
data_vars = {k:v for k,v in ds.dataset.variables.iteritems() if v not in coord_vars}
# find auxiliary coordinate variables via 'coordinates' attribute
auxiliary_coordinate_vars = []
for name, var in data_vars.iteritems():
if hasattr(var, u'coordinates'):
cv = var.coordinates.split(u' ')
for c in cv:
if c in ds.dataset.variables:
auxiliary_coordinate_vars.append(ds.dataset.variables[c])
for k, v in ds.dataset.variables.iteritems():
axis = getattr(v, u'axis', None)
if axis is None:
continue
# 1) axis must be X, Y, Z, or T
axis_valid = axis in [u'X', u'Y', u'Z', u'T']
avr = Result(BaseCheck.HIGH, axis_valid, u'§4 Axis attributes and coordinate variables')
if not axis_valid:
avr.msgs = [u'axis value (%s) is not valid' % axis]
ret_val.append(avr)
# 2) only coordinate vars or auxiliary coordinate variable are allowed to have axis set
if v in coord_vars or v in auxiliary_coordinate_vars:
acvr = Result(BaseCheck.HIGH, True, u'§4 Axis attributes and coordinate variables')
if v in auxiliary_coordinate_vars:
acvr.msgs = ["%s is an auxiliary coordinate var" % k]
else:
acvr = Result(BaseCheck.HIGH, False, u'§4 Axis attributes and coordinate variables')
acvr.msgs = [u'%s is not allowed to have an axis attr as it is not a coordinate var or auxiliary_coordinate_var' % k]
ret_val.append(acvr)
# 3) must be consistent with coordinate type deduced from units and positive
axis_type = guess_coord_type(getattr(v, u'units', None), getattr(v, u'positive', None))
if axis_type is not None:
atr = Result(BaseCheck.HIGH, axis_type == axis, u'§4 Axis attributes and coordinate variables')
if not atr.value:
atr.msgs = [u'%s guessed type (%s) is not consistent with coord type (%s)' % (k, axis_type, axis)]
ret_val.append(atr)
# 4) a data variable must not have more than one coordinate variable with a particular value of the axis attribute
if k in data_vars:
dep_axes = [(dim_to_axis[d], d) for d in v.dimensions if d in dim_to_axis]
dups = defaultdict(int)
for d in dep_axes:
dups[d[0][0]] += 1
dups = {kk:vv for kk,vv in dups.iteritems() if vv > 1}
coores = Result(BaseCheck.HIGH, len(dups) == 0, u'§4 Axis attributes and coordinate variables')
if not coores.value:
coores.msgs = []
for kk, vv in dups.iteritems():
same_axis = [item[1] for item in dep_axes if item[0] == kk]
coores.msgs.append(u'%s depends on multiple coord vars with axis attribute (%s): %s' % (k, kk, u','.join(same_axis)))
ret_val.append(coores)
return ret_val
def check_coordinate_vars_for_all_coordinate_types(self, ds):
"""
4 We strongly recommend that coordinate variables be used for all coordinate types whenever they are applicable.
"""
ret_val = []
# 1. Verify that for any known or common coordinate name as a dmension
# there is a coordinate variable for that dimension.
known_coordinate_names = (u'longitude', u'lon' , u'x',
u'latitude' , u'lat' , u'y',
u'vertical' , u'height', u'z',
u'time' , u't')
for k,v in ds.dataset.dimensions.iteritems():
if k.lower() in known_coordinate_names:
valid = k in ds.dataset.variables
result = Result(BaseCheck.MEDIUM, valid, u'§4 Coordinate Variables')
if not valid:
result.msgs = [u'No coordinate variable for coordinate type %s' % k]
ret_val.append(result)
#@TODO: Additional verifiable requirements
return ret_val
def _coord_has_units(self, name,coordinate, var, recommended, acceptable):
ret_val = []
has_units = hasattr(var, u'units')
result = Result(BaseCheck.HIGH, has_units, u'§4 Coordinate Variable %s contains valid attributes' % coordinate)
ret_val.append(result)
# 0 - does not have units
# 1 - incorrect units
# 2 - also acceptable units
# 3 - recommend units
if not has_units:
result = Result(BaseCheck.MEDIUM, (0, 3), u'§4.1 Coordinates representing %s' % coordinate, [u'%s does not have units attribute'%name])
ret_val.append(result)
elif has_units and var.units == recommended:
result = Result(BaseCheck.MEDIUM, (3, 3), u'§4.1 Coordinates representing %s' % coordinate)
ret_val.append(result)
elif has_units and var.units in acceptable:
result = Result(BaseCheck.MEDIUM, (2, 3), u'§4.1 Coordinates representing %s' % coordinate, [u'%s units are acceptable, but not recommended'%name])
ret_val.append(result)
else:
result = Result(BaseCheck.MEDIUM, (1, 3), u'§4.1 Coordinates representing %s' % coordinate, [u'%s does not have units attribute'%name])
ret_val.append(result)
return ret_val
def check_latitude(self, ds):
"""
4.1 Variables representing latitude must always explicitly include the units attribute; there is no default value.
The recommended unit of latitude is degrees_north. Also acceptable are degree_north, degree_N, degrees_N, degreeN, and degreesN.
Optionally, the latitude type may be indicated additionally by providing the standard_name attribute with the
value latitude, and/or the axis attribute with the value Y.
"""
ret_val = []
recommended = u'degrees_north'
acceptable = [u'degree_north', u'degree_N', u'degrees_N', u'degreeN', u'degreesN']
for k,v in ds.dataset.variables.iteritems():
if k == u'latitude' or getattr(v, u'standard_name', None) == u'latitude':
results = self._coord_has_units(k, u'latitude', v, recommended, acceptable)
ret_val.extend(results)
return ret_val
def check_longitude(self, ds):
"""
4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value.
The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE.
Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the
value longitude, and/or the axis attribute with the value X.
"""
ret_val = []
recommended = u'degrees_east'
acceptable = [u'degree_east', u'degree_E', u'degrees_E', u'degreeE', u'degreesE']
for k,v in ds.dataset.variables.iteritems():
if k == u'longitude' or getattr(v, u'standard_name', None) == u'longitude':
results = self._coord_has_units(k, u'longitude', v, recommended, acceptable)
ret_val.extend(results)
return ret_val
def check_vertical_coordinate(self, ds):
"""
4.3 Variables representing dimensional height or depth axes must always
explicitly include the units attribute; there is no default value.
The attribute positive is required if the vertical axis units are not a
valid unit of pressure. The positive attribute may have the value up or
down (case insensitive). This attribute may be applied to either
coordinate variables or auxillary coordinate variables that contain
vertical coordinate data.
"""
ret_val = []
for k,v in ds.dataset.variables.iteritems():
if is_vertical_coordinate(k,v):
# Vertical variables MUST have units
has_units = hasattr(v, u'units')
result = Result(BaseCheck.HIGH, \
has_units, \
u'§4.3 Vertical coordinates contain valid attributes')
ret_val.append(result)
# If it's not pressure then it must have positive defined
if not has_units:
result = Result(BaseCheck.HIGH, \
False, \
u'§4.3 Vertical coordinates contain valid attributes',[u'%s does not have units'%k])
ret_val.append(result)
continue
# Do we have pressure?
is_pressure = units_convertible(u'dbar', v.units)
if is_pressure:
result = Result(BaseCheck.HIGH, \
True, \
u'§4.3 Vertical coordinates contain valid attributes')
# What about positive?
elif getattr(v,'positive', u'').lower() in (u'up', u'down'):
result = Result(BaseCheck.HIGH, \
True, \
u'§4.3 Vertical coordinates contain valid attributes')
# Not-compliant
else:
result = Result(BaseCheck.HIGH, \
False, \
u'§4.3 Vertical coordinates contain valid attributes', \
[u'vertical variable %s needs to define positive attribute' % k])
ret_val.append(result)
return ret_val
def check_dimensional_vertical_coordinate(self, ds):
"""
4.3.1 The units attribute for dimensional coordinates will be a string
formatted as per the udunits.dat file.
The acceptable units for vertical (depth or height) coordinate variables
are:
- units of pressure as listed in the file udunits.dat. For vertical axes
the most commonly used of these include include bar, millibar,
decibar, atmosphere (atm), pascal (Pa), and hPa.
- units of length as listed in the file udunits.dat. For vertical axes
the most commonly used of these include meter (metre, m), and
kilometer (km).
- other units listed in the file udunits.dat that may under certain
circumstances reference vertical position such as units of density or
temperature.
Plural forms are also acceptable.
"""
ret_val = []
for k,v in ds.dataset.variables.iteritems():
# If this is not a vertical coordinate
if not is_vertical_coordinate(k,v):
continue
# If this is not height or depth
vertical_coordinates = (u'height', u'depth')
if k not in vertical_coordinates and \
getattr(v, u'standard_name', u'') not in vertical_coordinates:
continue
# Satisfies 4.3.1
# Pressure or length is okay
is_pressure = units_convertible(getattr(v, u'units', u'1'), u'dbar')
is_length = units_convertible(getattr(v, u'units', u'1'), u'm')
is_temp = units_convertible(getattr(v, u'units', u'1'), u'degrees_C')
is_density = units_convertible(getattr(v, u'units', u'1'), u'kg m-3')
if is_pressure or is_length:
result = Result(BaseCheck.HIGH, True, \
u'§4.3.1 Vertical dimension coordinates contain valid attributes', \
[u'dimensional vertical coordinate is pressure or length'])
# Temperature or Density are okay as well
elif is_temp or is_density:
result = Result(BaseCheck.HIGH, True, \
u'§4.3.1 Vertical dimension coordinates contain valid attributes', \
[u'dimensional vertical coordinate is temp or density'])
else:
result = Result(BaseCheck.HIGH, False, \
u'§4.3.1 Vertical dimension coordinates contain valid attributes', \
[u'incorrect vertical units'])
ret_val.append(result)
return ret_val
def check_dimensionless_vertical_coordinate(self, ds):
"""
4.3.2 The units attribute is not required for dimensionless coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
"""
ret_val = []
dimless = dict(dimless_vertical_coordinates)
for k,v in ds.dataset.variables.iteritems():
std_name = getattr(v, u'standard_name', u'')
if std_name not in dimless:
continue
# Determine if the regex matches for formula_terms
valid_formula = re.match(dimless[std_name], \
getattr(v, u'formula_terms', u''))
if valid_formula is not None:
result = Result(BaseCheck.MEDIUM, \
True, \
u'§4.3.2 Dimensionless Coordinates and formula_terms')
else:
result = Result(BaseCheck.MEDIUM, \
False, \
u'§4.3.2 Dimensionless Coordinates and formula_terms',\
[u'formula_terms missing from dimensionless coordinate %s' % k])
ret_val.append(result)
# Determine that each of the terms actually exists
# If formula_terms wasn't defined then this fails
if not valid_formula:
result = Result(BaseCheck.MEDIUM, \
False, \
u'§4.3.2 Dimensionless Coordinates and formula_terms', \
[u'formula_terms not defined for dimensionless coordinate %s' % k])
ret_val.append(result)
continue
# Check the terms
missing_terms = []
groups = valid_formula.groups()
for i in xrange(1, len(groups), 2):
varname = groups[i]
if varname not in ds.dataset.variables:
missing_terms.append(varname)
# Report the missing terms
result = Result(BaseCheck.MEDIUM, \
not missing_terms, \
u'§4.3.2 Dimensionless Coordinates and formula_terms', \
[u'%s missing for dimensionless coordinate %s' % (i,k) for i in missing_terms])
ret_val.append(result)
return ret_val
def check_time_coordinate(self, ds):
"""
4.4 Variables representing time must always explicitly include the units
attribute; there is no default value.
The units attribute takes a string value formatted as per the
recommendations in the Udunits package.
The acceptable units for time are listed in the udunits.dat file. The
most commonly used of these strings (and their abbreviations) includes
day (d), hour (hr, h), minute (min) and second (sec, s). Plural forms
are also acceptable. The reference time string (appearing after the
identifier since) may include date alone; date and time; or date, time,
and time zone. The reference time is required. A reference time in year
0 has a special meaning (see Section 7.4, "Climatological Statistics").
Recommend that the unit year be used with caution. It is not a calendar
year. For similar reasons the unit month should also be used with
caution.
A time coordinate is identifiable from its units string alone.
Optionally, the time coordinate may be indicated additionally by
providing the standard_name attribute with an appropriate value, and/or
the axis attribute with the value T.
"""
ret_val = []
for k,v in ds.dataset.variables.iteritems():
if not is_time_variable(k,v):
continue
# Has units
has_units = hasattr(v, u'units')
if not has_units:
result = Result(BaseCheck.HIGH, \
False, \
u'§4.4 Time coordinate variable and attributes',[u'%s does not have units'%k])
ret_val.append(result)
continue
# Correct and identifiable units
result = Result(BaseCheck.HIGH, \
True, \
u'§4.4 Time coordinate variable and attributes')
ret_val.append(result)
correct_units = units_temporal(v.units)
reasoning = None
if not correct_units:
reasoning = [u'%s doesn not have correct time units' % k]
result = Result(BaseCheck.HIGH, \
correct_units, \
u'§4.4 Time coordinate variable and attributes', \
reasoning)
ret_val.append(result)
return ret_val
def check_calendar(self, ds):
"""
4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
"""
valid_calendars = [
u'gregorian',
u'standard',
u'proleptic_gregorian',
u'noleap',
u'365_day',
u'all_leap',
u'366_day',
u'360_day',
u'julian',
u'none'
]
ret_val = []
for k,v in ds.dataset.variables.iteritems():
if not is_time_variable(k,v):
continue
reasoning = None
has_calendar = hasattr(v, u'calendar')
if not has_calendar:
reasoning = [u'Variable %s should have a calendar attribute' % k]
result = Result(BaseCheck.LOW, \
has_calendar, \
u'§4.4.1 Time and calendar', \
reasoning)
ret_val.append(result)
if has_calendar:
reasoning = None
valid_calendar = v.calendar in valid_calendars
if not valid_calendar:
reasoning = ["Variable %s should have a valid calendar: '%s' is not a valid calendar" % (k, v.calendar)]
result = Result(BaseCheck.LOW, \
valid_calendar, \
u'§4.4.1 Time and calendar', \
reasoning)
ret_val.append(result)
return ret_val
###############################################################################
#
# CHAPTER 5: Coordinate Systems
#
###############################################################################
def _is_station_var(self, var):
if getattr(var, u'standard_name', None) in (u'platform_name', u'station_name', u'instrument_name'):
return True
return False
def _get_coord_vars(self, ds):
coord_vars = []
for name,var in ds.dataset.variables.iteritems():
if (name,) == var.dimensions:
coord_vars.append(name)
return coord_vars
#def check_coordinate_systems(self, ds):
"""
5 All of a variable's spatiotemporal dimensions that are not latitude,
longitude, vertical, or time dimensions are required to be associated
with the relevant latitude, longitude, vertical, or time coordinates via
the new coordinates attribute of the variable. The value of the
coordinates attribute is a blank separated list of the names of
auxiliary coordinate variables.
The dimensions of an auxiliary coordinate variable must be a subset of
the dimensions of the variable with which the coordinate is associated,
with two exceptions:
- String-valued coordinates (Section 6.1, "Labels") have a dimension for
maximum string length
- In the ragged array representations of data (Chapter 9, Discrete
Sampling Geometries), special methods are needed to connect the data
and coordinates
Recommend that the name of a multidimensional coordinate variable should
not match the name of any of its dimensions because that precludes
supplying a coordinate variable for the dimension.
Auxiliary coordinate variables may not be used as the only way to
identify latitude and longitude coordinates that could be identified
using coordinate variables.
An application that is trying to find the latitude coordinate of a
variable should always look first to see if any of the variable's
dimensions correspond to a latitude coordinate variable. If the latitude
coordinate is not found this way, then the auxiliary coordinate
variables listed by the coordinates attribute should be checked. Note
that it is permissible, but optional, to list coordinate variables as
well as auxiliary coordinate variables in the coordinates attribute.
It is not permissible for a data variable to have both a coordinate
variable and an auxiliary coordinate variable, or more than one of
either type of variable, having an axis attribute with any given value
e.g. there must be no more than one axis attribute for X for any data
variable.
"""
#pass
def check_independent_axis_dimensions(self, ds):
"""
5.1 When each of a variable's spatiotemporal dimensions is a latitude,
longitude, vertical, or time dimension, then each axis is identified by
a coordinate variable.
"""
ret_val = []
space_time_dim = []
#Check to find all space-time dimension (Lat/Lon/Time/Height)
for dimension in ds.dataset.dimensions:
if dimension in _possibleaxis:
space_time_dim.append(dimension)
space_time_coord_var = []
#Check to find all space-time coordinate variables (Lat/Lon/Time/Height)
for each in self._find_coord_vars(ds):
if str(each._name) in _possibleaxis \
or (hasattr(each, u'units') and (each.units in _possibleaxisunits or each.units.split(" ")[0] in _possibleaxisunits)) \
or hasattr(each,u'positive'):
space_time_coord_var.append(each._name)
#Looks to ensure that every dimension of each variable that is a space-time dimension has associated coordinate variables
for name,var in ds.dataset.variables.iteritems():
valid = u''
for each in var.dimensions:
if each in space_time_dim:
if each in space_time_coord_var:
valid =True
else:
valid = False
dim_name = each
break
if valid == False :
ret_val.append(Result(BaseCheck.MEDIUM, \
valid, \
u'§5.1 Geophysical variables contain valid dimensions',[u'The %s dimension for the variable %s does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.'%(dim_name,name)]))
if valid == True and name not in space_time_coord_var:
ret_val.append(Result(BaseCheck.MEDIUM, \
valid, \
u'§5.1 Geophysical variables contain valid dimensions'))
return ret_val
def check_two_dimensional(self, ds):
"""
5.2 The latitude and longitude coordinates of a horizontal grid that was
not defined as a Cartesian product of latitude and longitude axes, can
sometimes be represented using two-dimensional coordinate variables.
These variables are identified as coordinates by use of the coordinates
attribute.
For each variable, if the variable has a coordinates attribute:
for each coordinate defined, verify that the coordinate:
is either a coordinate variable OR comprises coordinate variables
"""
ret_val = []
reported_reference_variables = []
for name,var in ds.dataset.variables.iteritems():
self_reference_variables = Set()
g = NCGraph(ds.dataset, name, var, self_reference_variables)
reasoning = []
for self_reference_variable in self_reference_variables:
if not self_reference_variable in reported_reference_variables:
reasoning.append("Variable %s's coordinate references itself" % (self_reference_variable))
result = Result(BaseCheck.HIGH,\
False,\
(u'§5.2 Latitude and longitude coordinates of a horizontal grid', self_reference_variable, u'coordinates_reference_itself'),\
reasoning)
ret_val.append(result)
reported_reference_variables.append(self_reference_variable)
#Determine if 2-D coordinate variables (Lat and Lon are of shape (i,j)
for each in g.coords:
try:
valid = g.coords[each].ndim == 2
except:
valid = False
if len(g.coords) == 2 and valid:
#------------------------------------------------------------
# Check all the dims are coordinate variables
#------------------------------------------------------------
valid_dims = True
for dim in g.dims.iterkeys():
if dim not in ds.dataset.variables:
valid_dims = False
reasoning.append("Variable %s's dimension %s is not a coordinate variable" % (name, dim))
result = Result(BaseCheck.HIGH, \
valid_dims, \
(u'§5.2 Latitude and longitude coordinates of a horizontal grid', name, u'2d_hgrid_valid_dimensions'), \
reasoning)
ret_val.append(result)
#------------------------------------------------------------
# Check that the coordinates are correct
#------------------------------------------------------------
valid_2d = True
reasoning = []
for cname, coord in g.coords.iteritems():
if coord is None:
valid_2d = False
reasoning.append("Variable %s's coordinate, %s, is not a coordinate or auxiliary variable" %(name, cname))
continue
for dim in coord.dims.iterkeys():
if dim not in g.dims:
valid_2d = False
reasoning.append("Variable %s's coordinate, %s, does not share dimension %s with the variable" % (name, cname, dim))
result = Result(BaseCheck.MEDIUM, \
valid_2d, \
(u'§5.2 Latitude and longitude coordinates of a horizontal grid', name, u'valid_coordinates'), \
reasoning)
ret_val.append(result)
#------------------------------------------------------------
# Can make lat/lon?
#------------------------------------------------------------
lat_check = False
lon_check = False
for cname, coord in g.coords.iteritems():
if coord != None and coord.units in [u'degrees_north', u'degree_north', u'degrees_N', u'degree_N', u'degreesN', u'degreeN']:
lat_check = True
elif coord != None and coord.units in [u'degrees_east', u'degree_east', u'degrees_E', u'degree_E', u'degreesE', u'degreeE']:
lon_check = True
result = Result(BaseCheck.HIGH, \
lat_check and lon_check, \
(u'§5.2 Latitude and longitude coordinates of a horizontal grid', name, u'lat_lon_correct'))
ret_val.append(result)
else:
continue # Not a 2d horizontal grid
return ret_val
def check_reduced_horizontal_grid(self, ds):
"""
5.3 A "reduced" longitude-latitude grid is one in which the points are
arranged along constant latitude lines with the number of points on a
latitude line decreasing toward the poles.
Recommend that this type of gridded data be stored using the compression
scheme described in Section 8.2, "Compression by Gathering". The
compressed latitude and longitude auxiliary coordinate variables are
identified by the coordinates attribute.
"""
ret_val = []
coord_vars = self._get_coord_vars(ds)
for name, var in ds.dataset.variables.iteritems():
if name in coord_vars:
continue
if not hasattr(var, u'coordinates'):
continue
valid = True
reasoning = []
valid_in_variables = True
valid_dim = True
valid_coord = True
valid_cdim = True
result = None
coords = var.coordinates.split(u' ')
for coord in coords:
is_reduced_horizontal_grid = True
if coord not in ds.dataset.variables:
valid_in_variables = False
reasoning.append("Coordinate %s is not a proper variable" % coord)
continue
for dim_name in ds.dataset.variables[coord].dimensions:
if dim_name not in var.dimensions:
valid_dim = False
reasoning.append("Coordinate %s's dimension, %s, is not a dimension of %s" %(coord, dim_name, name))
continue
if dim_name not in coord_vars:
valid_coord = False
reasoning.append("Coordinate %s's dimension, %s, is not a coordinate variable" % (coord, dim_name))
continue
dim = ds.dataset.variables[dim_name]
if not hasattr(dim, u'compress'):
is_reduced_horizontal_grid = False
continue
compress_dims = dim.compress.split(u' ')
for cdim in compress_dims:
if cdim not in ds.dataset.dimensions:
valid_cdim = False
reasoning.append("Dimension %s compresses non-existent dimension, %s" % (dim_name, cdim))
continue
if is_reduced_horizontal_grid == True:
result = Result(BaseCheck.MEDIUM, \
(valid_in_variables and valid_dim and valid_coord and valid_cdim), \
(u'§5.3 Is reduced horizontal grid', name, u'is_reduced_horizontal_grid'), \
reasoning)
if result:
ret_val.append(result)
return ret_val
# grid mapping dictionary, appendix F
grid_mapping_dict = {
u'albers_conical_equal_area': [(u'longitude_of_central_meridian', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'azimuthal_equidistant': [(u'longitude_of_projection_origin', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'lambert_cylindrical_equal_area': [(u'longitude_of_central_meridian', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate'), (u'standard_parallel',u'scale_factor_at_projection_origin')],
u'lambert_azimuthal_equal_area': [(u'longitude_of_projection_origin', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'lambert_conformal_conic': [(u'standard_parallel', u'longitude_of_central_meridian', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'latitude_longitude': [(),(),(u'longitude', u'latitude')],
u'mercator': [(u'longitude_of_projection_origin', u'false_easting', u'false_northing'),(),(u'projection_x_coordinate',u'projection_y_coordinate'), (u'standard_parallel', u'scale_factor_at_projection_origin')],
u'orthographic': [(u'longitude_of_projection_origin', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'polar_stereographic': [(u'straight_vertical_longitude_from_pole', u'latitude_of_projection_origin', u'false_easting', u'false_northing'),(),(u'projection_x_coordinate', u'projection_y_coordinate'), (u'standard_parallel', u'scale_factor_at_projection_origin')],
u'rotated_latitude_longitude': [(u'grid_north_pole_latitude', u'grid_north_pole_longitude'),(u'north_pole_grid_longitude'),(u'grid_latitude', u'grid_longitude')],
u'stereographic':[(u'longitude_of_projection_origin', u'latitude_of_projection_origin', u'scale_factor_at_projection_origin', u'false_easting', u'false_northing'),(),(u'projection_x_coordinate', u'projection_y_coordinate')],
u'transverse_mercator': [(u'scale_factor_at_central_meridian', u'longitude_of_central_meridian', u'latitude_of_projection_origin', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')],
u'vertical_perspective': [(u'longitude_of_projection_origin', u'latitude_of_projection_origin', u'perspective_point_height', u'false_easting', u'false_northing'), (), (u'projection_x_coordinate', u'projection_y_coordinate')]
}
def check_horz_crs_grid_mappings_projections(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute. If in
addition it is desired to describe the mapping between the given
coordinate variables and the true latitude and longitude coordinates,
the attribute grid_mapping may be used to supply this description.
This attribute is attached to data variables so that variables with
different mappings may be present in a single file. The attribute takes
a string value which is the name of another variable in the file that
provides the description of the mapping via a collection of attached
attributes. This variable is called a grid mapping variable and is of
arbitrary type since it contains no data. Its purpose is to act as a
container for the attributes that define the mapping.
The one attribute that all grid mapping variables must have is
grid_mapping_name which takes a string value that contains the mapping's
name. The other attributes that define a specific mapping depend on the
value of grid_mapping_name. The valid values of grid_mapping_name along
with the attributes that provide specific map parameter values are
described in Appendix F, Grid Mappings.
When the coordinate variables for a horizontal grid are longitude and
latitude, a grid mapping variable with grid_mapping_name of
latitude_longitude may be used to specify the ellipsoid and prime
meridian.
In order to make use of a grid mapping to directly calculate latitude
and longitude values it is necessary to associate the coordinate
variables with the independent variables of the mapping. This is done by
assigning a standard_name to the coordinate variable. The appropriate
values of the standard_name depend on the grid mapping and are given in
Appendix F, Grid Mappings.
"""
ret_val = []
reasoning = []
for name, var in ds.dataset.variables.iteritems():
valid_mapping_count = 0
total_mapping_count = 0
if hasattr(var, u'grid_mapping_name'):
total_mapping_count = 1
mapping = getattr(var, u'grid_mapping_name', u'')
if mapping in self.grid_mapping_dict.iterkeys():
valid_mapping_count = valid_mapping_count +1
else:
reasoning.append(u'The grid_mapping_name attribute is not an accepted value. See Appendix F.')
for each in self.grid_mapping_dict[mapping][0]:
total_mapping_count = total_mapping_count + 1
if each in dir(var):
valid_mapping_count = valid_mapping_count +1
else:
reasoning.append(u'The map parameters are not accepted values. See Appendix F.')
if len(self.grid_mapping_dict[mapping]) >=4:
for each in self.grid_mapping_dict[mapping][3:]:
every_flag = 0
total_mapping_count = total_mapping_count + 1
for every in each:
if every in dir(var):
valid_mapping_count = valid_mapping_count + 1
every_flag = every_flag +1
if every_flag == 0:
reasoning.append(u'Neither of the "either/or" parameters are present')
if every_flag == 2:
valid_mapping_count = valid_mapping_count - 2
total_mapping_count = total_mapping_count + len(self.grid_mapping_dict[mapping][2])
for name_again, var_again in ds.dataset.variables.iteritems():
if hasattr(var_again,u'standard_name'):
if var_again.standard_name in self.grid_mapping_dict[mapping][2]:
valid_mapping_count = valid_mapping_count + 1
result = Result(BaseCheck.MEDIUM, \
(valid_mapping_count, total_mapping_count), \
(u'§5.6 Grid mapping projection present', name, u'horz_crs_grid_mappings_projections'), \
reasoning)
ret_val.append(result)
return ret_val
def check_scalar_coordinate_system(self, ds):
"""
5.7 When a variable has an associated coordinate which is single-valued, that coordinate may be represented as a
scalar variable. Since there is no associated dimension these scalar coordinate variables should be attached to a
data variable via the coordinates attribute.
Once a name is used for a scalar coordinate variable it can not be used for a 1D coordinate variable. For this
reason we strongly recommend against using a name for a scalar coordinate variable that matches the name of any
dimension in the file.
"""
ret_val = []
for name, var in ds.dataset.variables.iteritems():
valid_scalar_coordinate_var = 0
total_scalar_coordinate_var = 0
reasoning = []
if not hasattr(var, u'coordinates'):
continue
for coordinate in getattr(var, u'coordinates', u'').split(" "):
if coordinate in ds.dataset.variables:
if ds.dataset.variables[coordinate].shape == ():
total_scalar_coordinate_var += 1
if coordinate not in ds.dataset.dimensions.keys():
valid_scalar_coordinate_var += 1
else:
reasoning.append(u'Scalar coordinate var (%s) of var (%s) is correct size but is present in the dimensions list, which is not allowed.'% (coordinate, name))
if total_scalar_coordinate_var > 0:
result = Result(BaseCheck.MEDIUM,
(valid_scalar_coordinate_var, total_scalar_coordinate_var),
(u'§5.7 Scalar coordinate variables', name, u'scalar_coordinates'),
reasoning)
ret_val.append(result)
return ret_val
###############################################################################
#
# CHAPTER 6: Labels and Alternative Coordinates
#
###############################################################################
def check_geographic_region(self, ds):
"""
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
"""
ret_val = []
reasoning = []
region_list = [
u'africa',
u'antarctica',
u'arabian_sea',
u'aral_sea',
u'arctic_ocean',
u'asia',
u'atlantic_ocean',
u'australia',
u'baltic_sea',
u'barents_opening',
u'barents_sea',
u'beaufort_sea',
u'bellingshausen_sea',
u'bering_sea',
u'bering_strait',
u'black_sea',
u'canadian_archipelago',
u'caribbean_sea',
u'caspian_sea',
u'central_america',
u'chukchi_sea',
u'contiguous_united_states',
u'denmark_strait',
u'drake_passage',
u'east_china_sea',
u'english_channel',
u'eurasia',
u'europe',
u'faroe_scotland_channel',
u'florida_bahamas_strait',
u'fram_strait',
u'global',
u'global_land',
u'global_ocean',
u'great_lakes',
u'greenland',
u'gulf_of_alaska',
u'gulf_of_mexico',
u'hudson_bay',
u'iceland_faroe_channel',
u'indian_ocean',
u'indonesian_throughflow',
u'indo_pacific_ocean',
u'irish_sea',
u'lake_baykal',
u'lake_chad',
u'lake_malawi',
u'lake_tanganyika',
u'lake_victoria',
u'mediterranean_sea',
u'mozambique_channel',
u'north_america',
u'north_sea',
u'norwegian_sea',
u'pacific_equatorial_undercurrent',
u'pacific_ocean',
u'persian_gulf',
u'red_sea',
u'ross_sea',
u'sea_of_japan',
u'sea_of_okhotsk',
u'south_america',
u'south_china_sea',
u'southern_ocean',
u'taiwan_luzon_straits',
u'weddell_sea',
u'windward_passage',
u'yellow_sea'
]
for name, var in ds.dataset.variables.iteritems():
if getattr(var, u'standard_name', u'') == u'region':
if u''.join(var[:]).lower() in region_list:
result = Result(BaseCheck.LOW, \
True, \
(u'§6.1.1 Geographic region specified', name, u'geographic_region'), \
reasoning)
else:
reasoning.append(u'The Region Value is not from the allowable list.')
result = Result(BaseCheck.LOW, \
False, \
(u'§6.1.1 Geographic region specified', name, u'geographic_region'), \
reasoning)
ret_val.append(result)
return ret_val
def check_alternative_coordinates(self, ds):
"""
6.2 In some situations a dimension may have alternative sets of coordinates values. Since there can only be
one coordinate variable for the dimension (the variable with the same name as the dimension), any alternative
sets of values have to be stored in auxiliary coordinate variables. For such alternative coordinate variables,
there are no mandatory attributes, but they may have any of the attributes allowed for coordinate variables.
"""
ret_val = []
reasoning = []
valid_alt_coordinate_var = 0
total_alt_coordinate_var = 0
coordinate_list = []
for name, var in ds.dataset.variables.iteritems():
if hasattr(var, u'coordinates'):
for coordinate in getattr(var, u'coordinates', u'').split(u' '):
coordinate_list.append(coordinate)
for name, var in ds.dataset.variables.iteritems():
if name in coordinate_list and var.ndim == 1 and name not in ds.dataset.dimensions:
result = Result(BaseCheck.MEDIUM,
True,
(u'§6.2 Alternative Coordinates', name, u'alternative_coordinates')
)
ret_val.append(result)
return ret_val
###############################################################################
#
# CHAPTER 7: Data Representative of Cells
#
###############################################################################
def check_cell_boundaries(self, ds):
"""
7.1 To represent cells we add the attribute bounds to the appropriate coordinate variable(s). The value of bounds
is the name of the variable that contains the vertices of the cell boundaries. We refer to this type of variable as
a "boundary variable." A boundary variable will have one more dimension than its associated coordinate or auxiliary
coordinate variable. The additional dimension should be the most rapidly varying one, and its size is the maximum
number of cell vertices.
Applications that process cell boundary data often times need to determine whether or not adjacent cells share an
edge. In order to facilitate this type of processing the following restrictions are placed on the data in boundary
variables:
Bounds for 1-D coordinate variables
For a coordinate variable such as lat(lat) with associated boundary variable latbnd(x,2), the interval endpoints
must be ordered consistently with the associated coordinate, e.g., for an increasing coordinate, lat(1) > lat(0)
implies latbnd(i,1) >= latbnd(i,0) for all i
If adjacent intervals are contiguous, the shared endpoint must be represented indentically in each instance where
it occurs in the boundary variable. For example, if the intervals that contain grid points lat(i) and lat(i+1) are
contiguous, then latbnd(i+1,0) = latbnd(i,1).
Bounds for 2-D coordinate variables with 4-sided cells
In the case where the horizontal grid is described by two-dimensional auxiliary coordinate variables in latitude
lat(n,m) and longitude lon(n,m), and the associated cells are four-sided, then the boundary variables are given
in the form latbnd(n,m,4) and lonbnd(n,m,4), where the trailing index runs over the four vertices of the cells.
Bounds for multi-dimensional coordinate variables with p-sided cells
In all other cases, the bounds should be dimensioned (...,n,p), where (...,n) are the dimensions of the auxiliary
coordinate variables, and p the number of vertices of the cells. The vertices must be traversed anticlockwise in the
lon-lat plane as viewed from above. The starting vertex is not specified.
"""
ret_val = []
reasoning = []
valid = u' '
for cvar, bvar in self._find_boundary_vars(ds).iteritems():
valid = True
if bvar.ndim !=cvar.ndim + 1:
valid = False
reasoning.append(u'The number of dimensions of the Coordinate Variable is %s, but the number of dimensions of the Boundary Variable is %s.'%(cvar.ndim, bvar.ndim))
result = Result(BaseCheck.MEDIUM,
valid,
(u'§7.1 Cell boundaries', cvar._name, u'cell_boundaries'),
reasoning)
ret_val.append(result)
reasoning = []
return ret_val
def check_cell_measures(self, ds):
"""
7.2 To indicate extra information about the spatial properties of a variable's grid cells, a cell_measures attribute may
be defined for a variable. This is a string attribute comprising a list of blank-separated pairs of words of the form
"measure: name". "area" and "volume" are the only defined measures.
The "name" is the name of the variable containing the measure values, which we refer to as a "measure variable". The
dimensions of the measure variable should be the same as or a subset of the dimensions of the variable to which they are
related, but their order is not restricted.
The variable must have a units attribute and may have other attributes such as a standard_name.
"""
ret_val = []
reasoning = []
paragraph = []
for name, var in ds.dataset.variables.iteritems():
for dim in var.dimensions:
if getattr(var, u'cell_measures', u''):
measures = getattr(var,u'coordinates',u'')
measures = measures.split(u': ')
if measures[0] not in [u'area', u'volume']:
reasoning.append("The 'measures' field is not equal to 'area' or 'volume'.")
return Result(BaseCheck.MEDIUM, \
False, \
(u'§7.2 Cell measures', name, u'cell_measures'), \
reasoning)
for every, attri in ds.dataset.variables.iteritems():
if every == measures[1]:
for dimi in attri.dimensions:
if dimi in var.dimensions:
valid = True
else:
reasoning.append(u'The measure variable dimensions are not a set or subset of the cell_measure variable.')
valid == False
result = Result(BaseCheck.MEDIUM, \
valid, \
(u'§7.2 Cell measures', name, u'cell_measures'), \
reasoning)
ret_val.append(result)
return ret_val
def check_cell_methods(self, ds):
"""
7.3 To describe the characteristic of a field that is represented by cell values, we define the cell_methods attribute
of the variable. This is a string attribute comprising a list of blank-separated words of the form "name: method". Each
"name: method" pair indicates that for an axis identified by name, the cell values representing the field have been
determined or derived by the specified method.
name can be a dimension of the variable, a scalar coordinate variable, a valid standard name, or the word "area"
values of method should be selected from the list in Appendix E, Cell Methods, which includes point, sum, mean, maximum,
minimum, mid_range, standard_deviation, variance, mode, and median. Case is not significant in the method name. Some
methods (e.g., variance) imply a change of units of the variable, as is indicated in Appendix E, Cell Methods.
Because the default interpretation for an intensive quantity differs from that of an extensive quantity and because this
distinction may not be understood by some users of the data, it is recommended that every data variable include for each
of its dimensions and each of its scalar coordinate variables the cell_methods information of interest (unless this
information would not be meaningful). It is especially recommended that cell_methods be explicitly specified for each
spatio-temporal dimension and each spatio-temporal scalar coordinate variable.
"""
_areatype_names = ["bare_ground",
"all_area_types",
"burnt_vegetation",
"c3_plant_functional_types",
"c4_plant_functional_types",
"clear_sky",
"cloud",
"crops",
"floating_ice",
"ice_free_land",
"ice_free_sea",
"lake_ice_or_sea_ice",
"land",
"land_ice",
"natural_grasses",
"pastures",
"primary_deciduous_trees",
"primary_evergreen_trees",
"sea",
"sea_ice",
"secondary_deciduous_trees",
"secondary_evergreen_trees",
"shrubs"
"snow",
"trees"
"vegetation"]
methods = [ u'point',
u'sum',
u'mean',
u'maximum',
u'minimum',
u'mid_range',
u'standard_deviation',
u'variance',
u'mode',
u'median']
ret_val = []
reasoning = []
paragraph = u''
named = u''
pvars = re.compile('\(.*?\)|(\w*?):')
psep = re.compile('((?P<var>\w+): (?P<method>\w+) ?(?P<where>where (?P<wtypevar>\w+) ?(?P<over>over (?P<otypevar>\w+))?| ?)(?P<brace>\(((?P<brace_wunit>\w+): (\d+) (?P<unit>\w+)|(?P<brace_opt>\w+): (\w+))\))*)')
names = list(ds.dataset.variables.iterkeys())
for name, var in ds.dataset.variables.iteritems():
named_dict = OrderedDict()
if getattr(var, u'cell_methods', u'') :
method = getattr(var, u'cell_methods', u'')
total_name_count = 0
cell_dims = []
for match in re.finditer(pvars, method):
if (match.groups()[0] is not None):
cell_dims.append(match.groups()[0])
total_name_count = total_name_count + 1
#print "cell_methods_check: number DIMs", total_name_count
# check that the name is valid
valid_name_count = 0
for match in re.finditer(psep, method):
#print 'dict ', match.groupdict()
if match.group(u'var') in ds.dataset.variables[name].dimensions:
valid_name_count = valid_name_count + 1
elif match.group(u'var') == u'area':
valid_name_count = valid_name_count + 1
elif match.group(u'var') in getattr(var,"coordinates",""):
valid_name_count = valid_name_count + 1
else:
reasoning.append(u'The name field does not match a dimension, area or coordinate.')
result = Result(BaseCheck.MEDIUM, \
(valid_name_count, total_name_count), \
(u'§7.3 Cell Methods', name, u'cell_methods_name'), \
reasoning)
ret_val.append(result)
#Checks if the method value of the 'name: method' pair is acceptable
reasoning = []
methods = [u'point', u'sum', u'mean', u'maximum', u'minimum', u'mid_range', u'standard_deviation', u'variance', u'mode', u'median']
valid_method_count = 0
for match in re.finditer(psep, method):
#print 'dict ', match.groupdict()
if match.group(u'method') in methods:
valid_method_count = valid_method_count + 1
else:
reasoning.append(u'The method field does not match a valid method value.')
total_method_count = total_name_count # all dims must have a valid method
result = Result(BaseCheck.MEDIUM, \
(valid_method_count, total_method_count), \
(u'§7.3 Cell Methods', name, u'cell_methods_method'), \
reasoning)
ret_val.append(result)
# check the method modifier 'name: method (modifier)'
reasoning = []
valid_brace_count = 0
total_brace_count = 0
for match in re.finditer(psep, method):
if match.group(u'brace') is not None:
total_brace_count = total_brace_count + 1
if match.group(u'brace_wunit') == u'interval':
valid_brace_count = valid_brace_count + 1
elif match.group(u'brace_wunit') in [u'comment', u'area']:
valid_brace_count = valid_brace_count + 1
else:
reasoning.append(u'The method modifier not valid.')
result = Result(BaseCheck.MEDIUM, \
(valid_brace_count, total_brace_count), \
(u'§7.3 Cell Methods', name, u'cell_methods_method_modifier'), \
reasoning)
ret_val.append(result)
#Checks the 'method where' formats
reasoning = []
valid_area_count = 0
total_area_count = 0
for match in re.finditer(psep, method):
if len(match.group(u'where')) != 0:
if match.group(u'wtypevar') in _areatype_names:
total_area_count = total_area_count + 1
if match.group(u'otypevar') is not None:
if match.group(u'otypevar') in _areatype_names:
valid_area_count = valid_area_count + 1
else:
reasoning.append(u'The "name: method where type over _areatype_names" ('+match.group(u'otypevar')+u') format is not correct.')
else:
valid_area_count = valid_area_count + 1
else:
reasoning.append(u'The "name: method where _areatype_names" ('+match.group(u'wvartype')+u') format is not correct.')
result = Result(BaseCheck.MEDIUM, \
(valid_area_count, total_area_count), \
(u'§7.3 Cell Methods', name, u'cell_methods_area'), \
reasoning)
ret_val.append(result)
#Checks the Climatology Variables - 7.4
reasoning = []
paragraph = []
total_climate_count = 0
valid_climate_count = 0
for name, var in ds.dataset.variables.iteritems():
if getattr(var, u'climatology', u''):
climate_dim = ds.dataset.variables[name].dimensions
clim_method = getattr(var, u'climatology', u'')
for each in climate.split(" "):
paragraph.append(each)
total_climate_count = total_climate_count+ 1
for name_again, var_again in ds.dataset.variables.iteritems():
if getattr(var_again,"cell_methods",""):
climate = getattr(var, u'cell_methods', u'')
name_dim = ds.dataset.variables[name_again].dimensions
if len(climate_dim)>0:
if climate_dim[0] in name_dim:
case1 = re.search(r"time: \w* within years time: \w* over years",climate)
case2 = re.search(r"time: \w* within days time: \w* over days$",climate)
case3 = re.search(r"time: \w* within days time: \w* over days time: \w* over years",climate)
if (case1 or case2 or case3) and len(ds.dataset.variables[clim_method].shape) == 2 and ds.dataset.variables[clim_method].shape[1] == 2 and ds.dataset.variables[clim_method].shape[0] == ds.dataset.variables[name_again].shape[0] :
valid_climate_count = 1
if not (case1 or case2 or case3):
reasoning.append(u'The "time: method within years/days over years/days" format is not correct.')
if not (len(ds.dataset.variables[clim_method].shape) == 2 and ds.dataset.variables[clim_method].shape[1] == 2 and ds.dataset.variables[clim_method].shape[0] == ds.dataset.variables[name_again].shape[0]):
reasoning.append(u'The dimensions of the climatology varaible is incorrect.')
result = Result(BaseCheck.MEDIUM, \
(valid_climate_count, total_climate_count), \
(u'§7.3 Cell Methods', name, u'cell_methods_climatology'), \
reasoning)
ret_val.append(result)
return ret_val
#def check_cell_methods_for_multi_axes(self, ds):
"""
7.3.1 If a data value is representative of variation over a combination of axes, a single method should be prefixed by the
names of all the dimensions involved (listed in any order, since in this case the order must be immaterial).
There is no way to check this. A warning should be posted explaining this method to the user!"
"""
#def check_spacing_and_extra_info(self, ds):
"""
7.3.2 To indicate more precisely how the cell method was applied, extra information may be included in parentheses ()
after the identification of the method. This information includes standardized and non-standardized parts.
The only standardized information is to provide the typical interval between the original data values to which the method
was applied, in the situation where the present data values are statistically representative of original data values which
had a finer spacing.
The syntax is (interval: value unit), where value is a numerical value and unit is a string that can be recognized by
UNIDATA's Udunits package.
If the cell method applies to a combination of axes, they may have a common original interval. Alternatively, they may have
separate intervals, which are matched to the names of axes by position.
If there is both standardized and non-standardized information, the non-standardized follows the standardized information
and the keyword comment:. If there is no standardized information, the keyword comment: should be omitted.
A dimension of size one may be the result of "collapsing" an axis by some statistical operation, for instance by
calculating a variance from time series data. We strongly recommend that dimensions of size one be retained (or scalar
coordinate variables be defined) to enable documentation of the method (through the cell_methods attribute) and its
domain (through the cell_bounds attribute).
"""
#def check_stats_applying_to_portions_of_cells(self, ds):
"""
7.3.3 By default, the statistical method indicated by cell_methods is assumed to have been evaluated over the entire
horizontal area of the cell. Sometimes, however, it is useful to limit consideration to only a portion of a cell.
One of two conventions may be used.
The first convention is a method that can be used for the common case of a single area-type. In this case, the
cell_methods attribute may include a string of the form "name: method where type".
The second convention is the more general. In this case, the cell_methods entry is of the form "name: method where
_areatype_names". Here _areatype_names is a string-valued auxiliary coordinate variable or string-valued scalar coordinate variable
with a standard_name of area_type. The variable _areatype_names contains the name(s) of the selected portion(s) of the grid
cell to which the method is applied.
If the method is mean, various ways of calculating the mean can be distinguished in the cell_methods attribute with
a string of the form "mean where type1 [over type2]". Here, type1 can be any of the possibilities allowed for _areatype_names
or type (as specified in the two paragraphs preceding above Example). The same options apply to type2, except it is
not allowed to be the name of an auxiliary coordinate variable with a dimension greater than one (ignoring the
dimension accommodating the maximum string length)
"""
#def check_cell_methods_with_no_coords(self, ds):
"""
7.3.4 To provide an indication that a particular cell method is relevant to the data without having to provide a
precise description of the corresponding cell, the "name" that appears in a "name: method" pair may be an
appropriate standard_name (which identifies the dimension) or the string, "area" (rather than the name of a scalar
coordinate variable or a dimension with a coordinate variable). This convention cannot be used, however, if the name
of a dimension or scalar coordinate variable is identical to name.
Recommend that whenever possible, cell bounds should be supplied by giving the variable a dimension of size one
and attaching bounds to the associated coordinate variable.
"""
#def check_climatological_statistics(self, ds):
"""
7.4 A climatological time coordinate variable does not have a bounds attribute. Instead, it has a climatology
attribute, which names a variable with dimensions (n,2), n being the dimension of the climatological time axis.
Using the units and calendar of the time coordinate variable, element (i,0) of the climatology variable specifies
the beginning of the first subinterval and element (i,1) the end of the last subinterval used to evaluate the
climatological statistics with index i in the time dimension. The time coordinates should be values that are
representative of the climatological time intervals, such that an application which does not recognise climatological
time will nonetheless be able to make a reasonable interpretation.
Valid values of the cell_methods attribute must be in one of the forms from the following list.
- time: method1 within years time: method2 over years
- time: method1 within days time: method2 over days
- time: method1 within days time: method2 over days time: method3 over years
The methods which can be specified are those listed in Appendix E, Cell Methods and each entry in the cell_methods
attribute may also, contain non-standardised information in parentheses after the method.
"""
###############################################################################
#
# CHAPTER 8: Reduction of Dataset Size
#
###############################################################################
def check_packed_data(self, ds):
"""
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and
add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,
and have add_offset added to them.
The units of a variable should be representative of the unpacked data.
If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked
data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset
attributes are of a different data type from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type float or both be of type double. An additional
restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is
not advised to unpack an int into a float as there is a potential precision loss.
When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,
valid_max, valid_range) must be of the same data type as the packed data.
"""
ret_val = []
for name, var in ds.dataset.variables.iteritems():
add_offset = getattr(var, u'add_offset', None)
scale_factor = getattr(var, u'scale_factor', None)
if not (add_offset or scale_factor):
continue
valid = True
reasoning = []
# if only one of these attributes is defined, assume they
# are the same type (value doesn't matter here)
if not add_offset:
add_offset = scale_factor
if not scale_factor:
scale_factor = add_offset
if type(add_offset) != type(scale_factor):
valid = False
reasoning.append("Attributes add_offset and scale_factor have different data type.")
elif type(scale_factor) != var.dtype:
# Check both attributes are type float or double
if not type(scale_factor) in [np.float, np.float16, np.float32, np.float64, np.float128]:
valid = False
reasoning.append("Attributes add_offset and scale_factor are not of type float or double.")
else:
# Check variable type is byte, short or int
if not var.dtype in [np.int, np.int8, np.int16, np.int32, np.int64]:
valid = False
reasoning.append("Variable is not of type byte, short, or int.")
result = Result(BaseCheck.MEDIUM, valid, (u'§8.1 Packed Data', name, u'packed_data'), reasoning)
ret_val.append(result)
reasoning = []
valid = True
# test further with _FillValue , valid_min , valid_max , valid_range
if hasattr(var, "_FillValue"):
if var._FillValue.dtype != var.dtype:
valid = False
reasoning.append("Type of _FillValue attribute (%s) does not match variable type (%s)" %\
(var._FillValue.dtype, var.dtype))
if hasattr(var, "valid_min"):
if var.valid_min.dtype != var.dtype:
valid = False
reasoning.append("Type of valid_min attribute (%s) does not match variable type (%s)" %\
(var.valid_min.dtype, var.dtype))
if hasattr(var, "valid_max"):
if var.valid_max.dtype != var.dtype:
valid = False
reasoning.append("Type of valid_max attribute (%s) does not match variable type (%s)" %\
(var.valid_max.dtype, var.dtype))
if hasattr(var, "valid_range"):
if var.valid_range.dtype != var.dtype:
valid = False
reasoning.append("Type of valid_range attribute (%s) does not match variable type (%s)" %\
(var.valid_range.dtype, var.dtype))
result = Result(BaseCheck.MEDIUM, valid, (u'§8.1 Packed Data', name, u'fillvalue_valid_range_attributes'), reasoning)
ret_val.append(result)
return ret_val
def check_compression(self, ds):
"""
8.2 To save space in the netCDF file, it may be desirable to eliminate points from data arrays that are invariably
missing. Such a compression can operate over one or more adjacent axes, and is accomplished with reference to a list
of the points to be stored.
The list is stored as the coordinate variable for the compressed axis of the data array. Thus, the list variable and
its dimension have the same name. The list variable has a string attribute compress, containing a blank-separated
list of the dimensions which were affected by the compression in the order of the CDL declaration of the uncompressed
array.
"""
ret_val = []
for name, var in ds.dataset.variables.iteritems():
valid_dim = 0
valid_form = 0
reasoning = []
if hasattr(var, u'compress'):
totals = 2
if name in var.dimensions and var.ndim == 1:
valid_dim = 1
else:
reasoning.append("The 'compress' attribute is not assigned to a coordinate variable.")
if all([each in ds.dataset.dimensions.keys() for each in getattr(var, u'compress', u'').split(" ")]):
valid_form = 1
else:
reasoning.append("The 'compress' attribute is not in the form of a coordinate.")
result = Result(BaseCheck.MEDIUM,
(valid_form +valid_dim, totals),
(u'§8.2 Dataset Compression', name, u'compressed_data'),
reasoning)
ret_val.append(result)
return ret_val
###############################################################################
#
# CHAPTER 9: Discrete Sampling Geometries
#
###############################################################################
@is_likely_dsg
def check_all_features_are_same_type(self, ds):
"""
9.1 The features contained within a collection must always be of the same type; and all the collections in a CF file
must be of the same feature type.
point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile.
The space-time coordinates that are indicated for each feature are mandatory. However a featureType may also include
other space-time coordinates which are not mandatory (notably the z coordinate).
"""
flag = 0
x = u''
y = u''
z = u''
t = u''
flag = 0
for var in self._find_coord_vars(ds):
if getattr(var,"grid_mapping_name", ""):
#DO GRIDMAPPING CHECKS FOR X,Y,Z,T
flag = 1
for name_again, var_again in ds.dataset.variables.iteritems():
if getattr(var_again,"standard_name","") == self.grid_mapping_dict[getattr(var,"grid_mapping_name", "")][2][0]:
x = name_again
if getattr(var_again,"standard_name","") == self.grid_mapping_dict[getattr(var,"grid_mapping_name", "")][2][1]:
y = name_again
for var in self._find_coord_vars(ds):
#DO STANDARD SEARCH
if getattr(var,u'units',u'').lower() in [u'pa', u'kpa', u'mbar', u'bar', u'atm', u'hpa', u'dbar'] or getattr(var,u'positive',u'') or getattr(var,u'standard_name',u'') == u'z' or getattr(var,u'axis',u'') == u'z':
z = var._name
if var._name.lower() in [u'lon', u'longitude'] and flag == 0:
x = var._name
elif var._name.lower()in [u'lat', u'latitude'] and flag == 0:
y = var._name
elif var._name.lower() == u'time':
t = var._name
if getattr(var, u'_CoordinateAxisType', u''):
axis_type = getattr(var, u'_CoordinateAxisType', u'')
if axis_type.lower() in [u'lon', u'longitude'] and flag == 0:
x = var._name
elif axis_type.lower()in [u'lat', u'latitude'] and flag == 0:
y = var._name
elif axis_type.lower() == u'time':
t = var._name
valid = False
feature_tuple_list = []
#create shape size tuple
if x ==u'' or y == u'' or t == u'':
return
elif z == u'':
feature_tuple = (ds.dataset.variables[x].ndim, ds.dataset.variables[y].ndim, ds.dataset.variables[t].ndim)
else:
feature_tuple = (ds.dataset.variables[x].ndim, ds.dataset.variables[y].ndim, ds.dataset.variables[t].ndim, ds.dataset.variables[z].ndim)
feature_tuple_list.append(feature_tuple)
data_vars = [each for name,each in ds.dataset.variables.iteritems() if hasattr(each,u'coordinates')]
for each in data_vars:
this_feature_tuple = tuple([ds.dataset.variables[every].ndim for every in each.dimensions])
feature_tuple_list.append(this_feature_tuple)
valid = all(x == feature_tuple_list[0] for x in feature_tuple_list)
return Result(BaseCheck.HIGH, valid, name=u'§9.1 Feature Types')
@is_likely_dsg
def check_orthogonal_multidim_array(self, ds):
"""
9.3.1 The orthogonal multidimensional array representation, the simplest representation, can be used if each feature
instance in the collection has identical coordinates along the element axis of the features.
Data variables have both an instance dimension and an element dimension. The dimensions may be given in any order.
If there is a need for either the instance or an element dimension to be the netCDF unlimited dimension (so that more
features or more elements can be appended), then that dimension must be the outer dimension of the data variable
i.e. the leading dimension in CDL.
"""
ret_val = []
reasoning = []
for name,var in ds.dataset.variables.iteritems():
reasoning = []
if not hasattr(var,u'count_variable') and not hasattr(var,u'index_variable'):
if hasattr(var, u'_FillValue'):
pass
else:
result = Result(BaseCheck.MEDIUM, \
True, \
(u'§9.3.1 Orthogonal Multidimensional Array', name, u'orthogonal_multidimensional'), \
reasoning)
ret_val.append(result)
return ret_val
@is_likely_dsg
def check_incomplete_multidim_array(self, ds):
"""
9.3.2 The incomplete multidimensional array representation can used if the features within a collection do not all have
the same number of elements, but sufficient storage space is available to allocate the number of elements required by
the longest feature to all features. That is, features that are shorter than the longest feature must be padded with
missing values to bring all instances to the same storage size.
Data variables have both an instance dimension and an element dimension. The dimensions may be given in any order.
If there is a need for either the instance or an element dimension to be the netCDF unlimited dimension (so that more
features or more elements can be appended), thlen that dimension must be the outer dimension of the data variable
i.e. the leading dimension in CDL.
"""
ret_val = []
for name,var in ds.dataset.variables.iteritems():
reasoning = []
if not hasattr(var,u'count_variable') and not hasattr(var,u'index_variable'):
if hasattr(var, u'_FillValue'):
result = Result(BaseCheck.MEDIUM, \
True, \
(u'§9.3.2 Incomplete Multidimensional Array', name, u'ragged_multidimensional'), \
reasoning)
ret_val.append(result)
else:
pass
return ret_val
@is_likely_dsg
def check_contiguous_ragged_array(self, ds):
"""
9.3.3 The contiguous ragged array representation can be used only if the size of each feature is known at the time
that it is created.
In this representation, the file contains a count variable, which must be of type integer and must have the instance
dimension as its sole dimension. The count variable contains the number of elements that each feature has. This
representation and its count variable are identifiable by the presence of an attribute, sample_dimension, found on
the count variable, which names the sample dimension being counted. For indices that correspond to features, whose
data have not yet been written, the count variable should have a value of zero or a missing value.
In the ragged array representations, the instance dimension (i), which sequences the individual features within the
collection, and the element dimension, which sequences the data elements of each feature (o and p), both occupy the
same dimension (the sample dimension). If the sample dimension is the netCDF unlimited dimension, new data can be
appended to the file.
"""
ret_val = []
reasoning = []
for name,var in ds.dataset.variables.iteritems():
if getattr(var,u'sample_dimension',''):
result = Result(BaseCheck.MEDIUM, \
True, \
(u'§9.3.3 Continuous ragged array', name, u'continuous_ragged'), \
reasoning)
ret_val.append(result)
else:
return []
return ret_val
@is_likely_dsg
def check_indexed_ragged_array(self, ds):
"""
9.3.4 The indexed ragged array representation stores the features interleaved along the sample dimension in the data
variable.
In this representation, the file contains an index variable, which must be of type integer, and must have the sample
dimension as its single dimension. The index variable contains the zero-based index of the feature to which each
element belongs. This representation is identifiable by the presence of an attribute, instance_dimension, on the index
variable, which names the dimension of the instance variables. For those indices of the sample dimension, into which
data have not yet been written, the index variable should be pre-filled with missing values.
In the ragged array representations, the instance dimension (i), which sequences the individual features within the
collection, and the element dimension, which sequences the data elements of each feature (o and p), both occupy the
same dimension (the sample dimension). If the sample dimension is the netCDF unlimited dimension, new data can be
appended to the file.
"""
ret_val = []
reasoning = []
for name,var in ds.dataset.variables.iteritems():
if getattr(var,u'instance_dimension',u''):
result = Result(BaseCheck.MEDIUM, \
True, \
(u'§9.3.4 Indexed ragged array', name, u'continuous_ragged'), \
reasoning)
ret_val.append(result)
else:
return []
return ret_val
@is_likely_dsg
def check_feature_type(self, ds):
"""
9.4 A global attribute, featureType, is required for all Discrete Geometry representations except the orthogonal
multidimensional array representation, for which it is highly recommended.
The value assigned to the featureType attribute is case-insensitive.
"""
reasoning=[]
feature_list = [u'point', u'timeseries',u'trajectory',u'profile', u'timeseriesprofile',u'trajectoryprofile']
if getattr(ds.dataset, u'featureType', u'').lower() in feature_list:
return Result(BaseCheck.MEDIUM,
True, u'§9.4 featureType attribute',
reasoning)
elif getattr(ds.dataset, u'featureType', u''):
reasoning.append(u'The featureType is provided and is not from the featureType list.')
return Result(BaseCheck.MEDIUM,
False, u'§9.4 featureType attribute',
reasoning)
@is_likely_dsg
def check_coordinates_and_metadata(self, ds):
"""
9.5 Every feature within a Discrete Geometry CF file must be unambiguously associated with an extensible collection
of instance variables that identify the feature and provide other metadata as needed to describe it. Every element of
every feature must be unambiguously associated with its space and time coordinates and with the feature that contains
it.
The coordinates attribute must be attached to every data variable to indicate the spatiotemporal coordinate variables
that are needed to geo-locate the data.
Where feasible a variable with the attribute cf_role should be included. The only acceptable values of cf_role for
Discrete Geometry CF data sets are timeseries_id, profile_id, and trajectory_id. The variable carrying the cf_role
attribute may have any data type. When a variable is assigned this attribute, it must provide a unique identifier
for each feature instance.
CF files that contain timeSeries, profile or trajectory featureTypes, should include only a single occurrence of a
cf_role attribute; CF files that contain timeSeriesProfile or trajectoryProfile may contain two occurrences,
corresponding to the two levels of structure in these feature types.
CF Discrete Geometries provides a mechanism to encode both the nominal and the precise positions, while retaining the
semantics of the idealized feature type. Only the set of coordinates which are regarded as the nominal (default or
preferred) positions should be indicated by the attribute axis, which should be assigned string values to indicate
the orientations of the axes (X, Y, Z, or T).
Auxiliary coordinate variables containing the nominal and the precise positions should be listed in the relevant
coordinates attributes of data variables. In orthogonal representations the nominal positions could be coordinate
variables, which do not need to be listed in the coordinates attribute, rather than auxiliary coordinate variables.
Coordinate bounds may optionally be associated with coordinate variables and auxiliary coordinate variables using
the bounds attribute.
If there is a vertical coordinate variable or auxiliary coordinate variable, it must be identified by the means
specified in section 4.3. The use of the attribute axis=Z is recommended for clarity. A standard_name attribute
that identifies the vertical coordinate is recommended.
"""
ret_val = []
reasoning = []
name_list = []
non_data_list = []
data_list = []
for name,var in ds.dataset.variables.iteritems():
if var.dimensions and not hasattr(var, u'cf_role') and not self._is_station_var(var):
if var.dimensions != (name,):
name_list.append(name)
for name,var in ds.dataset.variables.iteritems():
if hasattr(var, u'coordinates'):
for each in getattr(var, u'coordinates', u'').split(u' '):
if each in name_list:
non_data_list.append(each)
if hasattr(var, u'ancillary_variables'):
for each in getattr(var, u'ancillary_variables', u'').split(u' '):
non_data_list.append(each)
data_list = [each for each in name_list if each not in non_data_list]
for each in data_list:
if getattr(ds.dataset.variables[each], u'coordinates', u''):
result = Result(BaseCheck.MEDIUM, \
True, \
(u'§9.5 Discrete Geometry', each, u'check_coordinates'), \
reasoning)
ret_val.append(result)
reasoning = []
else:
reasoning.append(u'The variable %s does not have associated coordinates' %each)
result = Result(BaseCheck.MEDIUM, \
False, \
(u'§9.5 Discrete Geometry', each, u'check_coordinates'), \
reasoning)
ret_val.append(result)
reasoning = []
role_list = [getattr(var, u'cf_role', u'').split(u' ') for name,var in ds.dataset.variables.iteritems() if hasattr(var, u'cf_role')]
single_role = [u'timeseries', u'profile', u'trajectory']
dual_role = [u'timeseries', u'profile', u'trajectory',u'timeseriesprofile', u'trajectoryprofile']
if getattr(ds.dataset, u'featureType', u'').lower() in single_role and len(np.ravel(role_list)) == 1:
reasoning = []
valid = True
elif getattr(ds.dataset, u'featureType', u'').lower() in dual_role and len(np.ravel(role_list)) in [1,2]:
reasoning = []
valid = True
else:
valid = False
reasoning = []
reasoning.append(u'The cf_role featureType is not properly defined.')
result = Result(BaseCheck.MEDIUM, \
valid, \
u'§9.5 Discrete Geometry', \
reasoning)
ret_val.append(result)
return ret_val
@is_likely_dsg
def check_missing_data(self, ds):
"""
9.6 Auxiliary coordinate variables (spatial and time) must contain missing values to indicate a void in data storage
in the file but must not have missing data for any other reason.
It is not permitted for auxiliary coordinate variables to have missing values for elements where there is non-missing
data. Where any auxiliary coordinate variable contains a missing value, all other coordinate, auxiliary coordinate
and data values corresponding to that element should also contain missing values. Data variables should (as usual)
also contain missing values to indicate when there is no valid data available for the element, although the
coordinates are valid.
Similarly, for indices where the instance variable identified by cf_role contains a missing value indicator, all other
instance variable should also contain missing values.
"""
ret_val = []
name_list = ds.dataset.variables.keys()
dim_list = ds.dataset.dimensions.keys()
for name, var in ds.dataset.variables.iteritems():
if hasattr(var,u'coordinates'):
aux_index_dict = {}
dim_index_dict = {}
reasoning = []
valid = False
aux_valid = False
if hasattr(var, u'_FillValue'):
for coordinate in getattr(var, u'coordinates', u'').split(" "):
indices = []
if coordinate in name_list and coordinate not in dim_list:
try:
indices = np.where(ds.dataset.variables[coordinate] == var._FillValue).tolist()
except:
indices = np.where(ds.dataset.variables[coordinate] == var._FillValue)[0].tolist()
dim_index_dict[name+u'-'+coordinate] = indices
aux_index_dict[name+u'-'+coordinate] = indices
elif coordinate in name_list and coordinate in dim_list:
try:
indices = np.where(ds.dataset.variables[coordinate] == var._FillValue).tolist()
except:
indices = np.where(ds.dataset.variables[coordinate] == var._FillValue)[0].tolist()
dim_index_dict[name+u'-'+coordinate] = indices
else:
dim_index_dict[name+u'-'+coordinate] = []
#Check to see that all coordinate variable mising data locations are the same
aux_index_list = []
for each in aux_index_dict:
aux_index_list.append(aux_index_dict[each])
if aux_index_list != []:
aux_valid = all(x == aux_index_list[0] for x in aux_index_list)
else:
aux_valid = True
#Check to see that all auxilliary coordinate variable missing data appears in the coordinate variables
dim_index_list = []
for each in dim_index_dict:
dim_index_list.append(dim_index_dict[each])
if dim_index_list != []:
valid = all(x == dim_index_list[0] for x in dim_index_list)
else:
valid = True
if aux_valid == False:
reasoning.append(u'The auxillary coordinates do not have the same missing data locations')
if valid == False:
reasoning.append(u'The coordinate variables do not have the same missing data locations as the auxillary coordinates')
#Check to see that all coordinate variable mising data is reflceted in the dataset
valid_missing = True
if hasattr(var, u'_FillValue'):
try:
x_indices = np.where(var==var._FillValue).tolist()
except:
x_indices = np.where(var==var._FillValue)[0].tolist()
for coordinate in var.coordinates.split(" "):
coordinate_ind_list = dim_index_dict[name+u'-'+coordinate]
valid_missing = all(each in x_indices for each in coordinate_ind_list)
if valid_missing == False:
reasoning.append(u'The data does not have the same missing data locations as the coordinates')
count = int(valid) + int(aux_valid) + int(valid_missing)
result = Result(BaseCheck.MEDIUM,
valid and aux_valid and valid_missing,
(u'§9.6 Missing Data', name, u'missing_data'),
reasoning)
ret_val.append(result)
return ret_val
def _find_container_variables(self, ds):
container_vars = []
platform_name = getattr(ds.dataset, u'platform', None)
if platform_name is not None:
container_vars.append(platform_name)
for k, v in ds.dataset.variables.iteritems():
if k in (u'crs', u'instrument', u'station'):
if not v.shape: # Empty dimension
container_vars.append(k)
platform_name = getattr(v, u'platform', None)
if platform_name is not None:
container_vars.append(platform_name)
instrument_name = getattr(v, u'instrument_name', None)
if instrument_name is not None:
container_vars.append(instrument_name)
return list(set(container_vars))
class CFNCCheck(BaseNCCheck, CFBaseCheck):
@classmethod
def beliefs(cls): # @TODO
return {}
| abirger/compliance-checker | compliance_checker/cf/cf.py | Python | apache-2.0 | 138,963 | [
"NetCDF"
] | f38485fdbf49a28b2e4973d73900e258e1732ea1f646f64f03fd890e330f7ea5 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2010 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Class handling language-specific displaying of names.
Specific symbols for parts of a name are defined:
't' : title
'f' : given (first names)
'l' : full surname (lastname)
'c' : callname
'x' : nick name if existing, otherwise first first name (common name)
'i' : initials of the first names
'm' : primary surname (main)
'0m': primary surname prefix
'1m': primary surname surname
'2m': primary surname connector
'y' : pa/matronymic surname (father/mother) - assumed unique
'0y': " prefix
'1y': " surname
'2y': " connector
'o' : surnames without pa/matronymic and primary
'r' : non primary surnames (rest)
'p' : list of all prefixes
'q' : surnames without prefixes and connectors
's' : suffix
'n' : nick name
'g' : family nick name
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from ..ggettext import sgettext as _
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..lib.name import Name
from ..lib.nameorigintype import NameOriginType
try:
from ..config import config
WITH_GRAMPS_CONFIG=True
except ImportError:
WITH_GRAMPS_CONFIG=False
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_FIRSTNAME = 4
_SURNAME_LIST = 5
_SUFFIX = 6
_TITLE = 7
_TYPE = 8
_GROUP = 9
_SORT = 10
_DISPLAY = 11
_CALL = 12
_NICK = 13
_FAMNICK = 14
_SURNAME_IN_LIST = 0
_PREFIX_IN_LIST = 1
_PRIMARY_IN_LIST = 2
_TYPE_IN_LIST = 3
_CONNECTOR_IN_LIST = 4
_ORIGINPATRO = NameOriginType.PATRONYMIC
_ORIGINMATRO = NameOriginType.MATRONYMIC
_ACT = True
_INA = False
_F_NAME = 0 # name of the format
_F_FMT = 1 # the format string
_F_ACT = 2 # if the format is active
_F_FN = 3 # name format function
_F_RAWFN = 4 # name format raw function
PAT_AS_SURN = False
#-------------------------------------------------------------------------
#
# Local functions
#
#-------------------------------------------------------------------------
# Because of occurring in an exec(), this couldn't be in a lambda:
# we sort names first on longest first, then last letter first, this to
# avoid translations of shorter terms which appear in longer ones, eg
# namelast may not be mistaken with name, so namelast must first be
# converted to %k before name is converted.
def _make_cmp(a, b): return -cmp((len(a[1]),a[1]), (len(b[1]), b[1]))
#-------------------------------------------------------------------------
#
# NameDisplayError class
#
#-------------------------------------------------------------------------
class NameDisplayError(Exception):
"""
Error used to report that the name display format string is invalid.
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
#-------------------------------------------------------------------------
#
# Functions to extract data from raw lists (unserialized objects)
#
#-------------------------------------------------------------------------
def _raw_full_surname(raw_surn_data_list):
"""method for the 'l' symbol: full surnames"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_primary_surname(raw_surn_data_list):
"""method for the 'm' symbol: primary surname"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
#if there are multiple surnames, return the primary. If there
#is only one surname, then primary has little meaning, and we
#assume a pa/matronymic should not be given as primary as it
#normally is defined independently
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_primary_surname_only(raw_surn_data_list):
"""method to obtain the raw primary surname data, so this returns a string
"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_SURNAME_IN_LIST]
return ''
def _raw_primary_prefix_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_PREFIX_IN_LIST]
return ''
def _raw_primary_conn_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_CONNECTOR_IN_LIST]
return ''
def _raw_patro_surname(raw_surn_data_list):
"""method for the 'y' symbol: patronymic surname"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_surname_only(raw_surn_data_list):
"""method for the '1y' symbol: patronymic surname only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_prefix_only(raw_surn_data_list):
"""method for the '0y' symbol: patronymic prefix only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_conn_only(raw_surn_data_list):
"""method for the '2y' symbol: patronymic conn only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_nonpatro_surname(raw_surn_data_list):
"""method for the 'o' symbol: full surnames without pa/matronymic or
primary
"""
result = ""
for raw_surn_data in raw_surn_data_list:
if ((not raw_surn_data[_PRIMARY_IN_LIST]) and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINPATRO and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINMATRO):
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_nonprimary_surname(raw_surn_data_list):
"""method for the 'r' symbol: nonprimary surnames"""
result = ''
for raw_surn_data in raw_surn_data_list:
if not raw_surn_data[_PRIMARY_IN_LIST]:
result = "%s %s %s %s" % (result, raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
def _raw_prefix_surname(raw_surn_data_list):
"""method for the 'p' symbol: all prefixes"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_single_surname(raw_surn_data_list):
"""method for the 'q' symbol: surnames without prefix and connectors"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split()).strip()
def cleanup_name(namestring):
"""Remove too long white space due to missing name parts,
so "a b" becomes "a b" and "a , b" becomes "a, b"
"""
parts = namestring.split()
if not parts:
return ""
result = parts[0]
for val in parts[1:]:
if len(val) == 1 and val in [',', ';', ':']:
result += val
else:
result += ' ' + val
return result
#-------------------------------------------------------------------------
#
# NameDisplay class
#
#-------------------------------------------------------------------------
class NameDisplay(object):
"""
Base class for displaying of Name instances.
property:
* default_format: the default name format to use
* pas_as_surn : if only one surname, see if pa/ma should be considered as
'the' surname.
"""
format_funcs = {}
raw_format_funcs = {}
STANDARD_FORMATS = [
(Name.DEF, _("Default format (defined by Gramps preferences)"), '', _ACT),
(Name.LNFN, _("Surname, Given Suffix"), '%l, %f %s', _ACT),
(Name.FN, _("Given"), '%f', _ACT),
(Name.FNLN, _("Given Surname Suffix"), '%f %l %s', _ACT),
# primary name primconnector other, given pa/matronynic suffix, primprefix
# translators, long string, have a look at Preferences dialog
(Name.LNFNP, _("Main Surnames, Given Patronymic Suffix Prefix"),
'%1m %2m %o, %f %1y %s %0m', _ACT),
# DEPRECATED FORMATS
(Name.PTFN, _("Patronymic, Given"), '%y, %s %f', _INA),
]
def __init__(self):
global WITH_GRAMP_CONFIG
global PAT_AS_SURN
self.name_formats = {}
if WITH_GRAMPS_CONFIG:
self.default_format = config.get('preferences.name-format')
if self.default_format == 0:
self.default_format = Name.LNFN
config.set('preferences.name-format', self.default_format)
#if only one surname, see if pa/ma should be considered as
# 'the' surname.
PAT_AS_SURN = config.get('preferences.patronimic-surname')
config.connect('preferences.patronimic-surname', self.change_pa_sur)
else:
self.default_format = Name.LNFN
PAT_AS_SURN = False
#preinit the name formats, this should be updated with the data
#in the database once a database is loaded
self.set_name_format(self.STANDARD_FORMATS)
def change_pa_sur(self, *args):
""" How to handle single patronymic as surname is changed"""
global PAT_AS_SURN
PAT_AS_SURN = config.get('preferences.patronimic-surname')
def get_pat_as_surn(self):
global PAT_AS_SURN
return PAT_AS_SURN
def _format_fn(self, fmt_str):
return lambda x: self.format_str(x, fmt_str)
def _format_raw_fn(self, fmt_str):
return lambda x: self.format_str_raw(x, fmt_str)
def _raw_lnfn(self, raw_data):
result = "%s, %s %s" % (_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fnln(self, raw_data):
result = "%s %s %s" % (raw_data[_FIRSTNAME],
_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fn(self, raw_data):
result = raw_data[_FIRSTNAME]
return ' '.join(result.split())
def set_name_format(self, formats):
raw_func_dict = {
Name.LNFN : self._raw_lnfn,
Name.FNLN : self._raw_fnln,
Name.FN : self._raw_fn,
}
for (num, name, fmt_str, act) in formats:
func = self._format_fn(fmt_str)
func_raw = raw_func_dict.get(num, self._format_raw_fn(fmt_str))
self.name_formats[num] = (name, fmt_str, act, func, func_raw)
self.set_default_format(self.get_default_format())
def add_name_format(self, name, fmt_str):
num = -1
while num in self.name_formats:
num -= 1
self.set_name_format([(num, name, fmt_str,_ACT)])
return num
def edit_name_format(self, num, name, fmt_str):
self.set_name_format([(num, name, fmt_str,_ACT)])
if self.default_format == num:
self.set_default_format(num)
def del_name_format(self, num):
try:
del self.name_formats[num]
except:
pass
def set_default_format(self, num):
if num not in self.name_formats:
num = Name.LNFN
# if user sets default format to the Gramps default format,
# then we select LNFN as format.
if num == Name.DEF:
num = Name.LNFN
self.default_format = num
self.name_formats[Name.DEF] = (self.name_formats[Name.DEF][_F_NAME],
self.name_formats[Name.DEF][_F_FMT],
self.name_formats[Name.DEF][_F_ACT],
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
def get_default_format(self):
return self.default_format
def set_format_inactive(self, num):
try:
self.name_formats[num] = (self.name_formats[num][_F_NAME],
self.name_formats[num][_F_FMT],
_INA,
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
except:
pass
def get_name_format(self, also_default=False,
only_custom=False,
only_active=True):
"""
Get a list of tuples (num, name,fmt_str,act)
"""
the_list = []
keys = sorted(self.name_formats, self._sort_name_format)
for num in keys:
if ((also_default or num) and
(not only_custom or (num < 0)) and
(not only_active or self.name_formats[num][_F_ACT])):
the_list.append((num,) + self.name_formats[num][_F_NAME:_F_FN])
return the_list
def _sort_name_format(self, x, y):
if x < 0:
if y < 0:
return x+y
else:
return -x+y
else:
if y < 0:
return -x+y
else:
return x-y
def _is_format_valid(self, num):
try:
if not self.name_formats[num][_F_ACT]:
num = 0
except:
num = 0
return num
#-------------------------------------------------------------------------
def _gen_raw_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form:
def fn(raw_data):
return "%s %s %s" % (raw_data[_TITLE],
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name if existing, otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("raw_data[_TITLE]", "title",
_("Person|title")),
"f": ("raw_data[_FIRSTNAME]", "given",
_("given")),
"l": ("_raw_full_surname(raw_data[_SURNAME_LIST])", "surname",
_("surname")),
"s": ("raw_data[_SUFFIX]", "suffix",
_("suffix")),
"c": ("raw_data[_CALL]", "call",
_("Name|call")),
"x": ("(raw_data[_NICK] or raw_data[_FIRSTNAME].split(' ')[0])",
"common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' +" +
" raw_data[_FIRSTNAME]).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_data[_SURNAME_LIST])",
"primary",
_("Name|primary")),
"0m": ("_raw_primary_prefix_only(raw_data[_SURNAME_LIST])",
"primary[pre]",
_("primary[pre]")),
"1m": ("_raw_primary_surname_only(raw_data[_SURNAME_LIST])",
"primary[sur]",
_("primary[sur]")),
"2m": ("_raw_primary_conn_only(raw_data[_SURNAME_LIST])",
"primary[con]",
_("primary[con]")),
"y": ("_raw_patro_surname(raw_data[_SURNAME_LIST])", "patronymic",
_("patronymic")),
"0y": ("_raw_patro_prefix_only(raw_data[_SURNAME_LIST])", "patronymic[pre]",
_("patronymic[pre]")),
"1y": ("_raw_patro_surname_only(raw_data[_SURNAME_LIST])", "patronymic[sur]",
_("patronymic[sur]")),
"2y": ("_raw_patro_conn_only(raw_data[_SURNAME_LIST])", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_data[_SURNAME_LIST])", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_data[_SURNAME_LIST])",
"rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_data[_SURNAME_LIST])",
"prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_data[_SURNAME_LIST])",
"rawsurnames",
_("rawsurnames")),
"n": ("raw_data[_NICK]", "nickname",
_("nickname")),
"g": ("raw_data[_FAMNICK]", "familynick",
_("familynick")),
}
args = "raw_data"
return self._make_fn(format_str, d, args)
def _gen_cooked_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form:
def fn(first, raw_surname_list, suffix, title, call,):
return "%s %s" % (first,suffix)
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name if existing, otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("title", "title",
_("Person|title")),
"f": ("first", "given",
_("given")),
"l": ("_raw_full_surname(raw_surname_list)", "surname",
_("surname")),
"s": ("suffix", "suffix",
_("suffix")),
"c": ("call", "call",
_("Name|call")),
"x": ("(nick or first.split(' ')[0])", "common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' + first).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_surname_list)", "primary",
_("Name|primary")),
"0m":("_raw_primary_prefix_only(raw_surname_list)",
"primary[pre]", _("primary[pre]")),
"1m":("_raw_primary_surname_only(raw_surname_list)",
"primary[sur]",_("primary[sur]")),
"2m":("_raw_primary_conn_only(raw_surname_list)",
"primary[con]", _("primary[con]")),
"y": ("_raw_patro_surname(raw_surname_list)", "patronymic",
_("patronymic")),
"0y":("_raw_patro_prefix_only(raw_surname_list)", "patronymic[pre]",
_("patronymic[pre]")),
"1y":("_raw_patro_surname_only(raw_surname_list)", "patronymic[sur]",
_("patronymic[sur]")),
"2y":("_raw_patro_conn_only(raw_surname_list)", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_surname_list)", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_surname_list)", "rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_surname_list)", "prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_surname_list)", "rawsurnames",
_("rawsurnames")),
"n": ("nick", "nickname",
_("nickname")),
"g": ("famnick", "familynick",
_("familynick")),
}
args = "first,raw_surname_list,suffix,title,call,nick,famnick"
return self._make_fn(format_str, d, args)
def format_str(self, name, format_str):
return self._format_str_base(name.first_name, name.surname_list,
name.suffix, name.title,
name.call, name.nick, name.famnick,
format_str)
def format_str_raw(self, raw_data, format_str):
"""
Format a name from the raw name list. To make this as fast as possible
this uses _gen_raw_func to generate a new method for each new format_string.
Is does not call _format_str_base because it would introduce an extra
method call and we need all the speed we can squeeze out of this.
"""
func = self.__class__.raw_format_funcs.get(format_str)
if func is None:
func = self._gen_raw_func(format_str)
self.__class__.raw_format_funcs[format_str] = func
return func(raw_data)
def _format_str_base(self, first, surname_list, suffix, title, call,
nick, famnick, format_str):
"""
Generates name from a format string.
The following substitutions are made:
'%t' : title
'%f' : given (first names)
'%l' : full surname (lastname)
'%c' : callname
'%x' : nick name if existing, otherwise first first name (common name)
'%i' : initials of the first names
'%m' : primary surname (main)
'%0m': prefix primary surname (main)
'%1m': surname primary surname (main)
'%2m': connector primary surname (main)
'%y' : pa/matronymic surname (father/mother) - assumed unique
'%0y': prefix "
'%1y': surname "
'%2y': connector "
'%o' : surnames without patronymic
'%r' : non-primary surnames (rest)
'%p' : list of all prefixes
'%q' : surnames without prefixes and connectors
'%s' : suffix
'%n' : nick name
'%g' : family nick name
The capital letters are substituted for capitalized name components.
The %% is substituted with the single % character.
All the other characters in the fmt_str are unaffected.
"""
func = self.__class__.format_funcs.get(format_str)
if func is None:
func = self._gen_cooked_func(format_str)
self.__class__.format_funcs[format_str] = func
try:
s = func(first, [surn.serialize() for surn in surname_list],
suffix, title, call, nick, famnick)
except (ValueError, TypeError,):
raise NameDisplayError, "Incomplete format string"
return s
#-------------------------------------------------------------------------
def primary_surname(self, name):
global PAT_AS_SURN
nrsur = len(name.surname_list)
sur = name.get_primary_surname()
if not PAT_AS_SURN and nrsur <= 1 and \
(sur.get_origintype().value == _ORIGINPATRO
or sur.get_origintype().value == _ORIGINMATRO):
return ''
return sur.get_surname()
def sort_string(self, name):
return u"%-25s%-30s%s" % (self.primary_surname(name),
name.first_name, name.suffix)
def sorted(self, person):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} in a manner that should be used for displaying a sorted
name.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
name = person.get_primary_name()
return self.sorted_name(name)
def sorted_name(self, name):
"""
Return a text string representing the L{Name} instance
in a manner that should be used for sorting the name in a list.
@param name: L{Name} instance that is to be displayed.
@type name: L{Name}
@returns: Returns the L{Name} string representation
@rtype: str
"""
num = self._is_format_valid(name.sort_as)
return self.name_formats[num][_F_FN](name)
def truncate(self, full_name, max_length=15, elipsis="..."):
name_out = ""
if len(full_name) <= max_length:
name_out = full_name
else:
last_space = full_name.rfind(" ", max_length)
if (last_space) > -1:
name_out = full_name[:last_space]
else:
name_out = full_name[:max_length]
name_out += " " + elipsis
return name_out
def raw_sorted_name(self, raw_data):
"""
Return a text string representing the L{Name} instance
in a manner that should be used for sorting the name in a list.
@param name: L{raw_data} raw unserialized data of name that is to be displayed.
@type name: tuple
@returns: Returns the L{Name} string representation
@rtype: str
"""
num = self._is_format_valid(raw_data[_SORT])
return self.name_formats[num][_F_RAWFN](raw_data)
def display(self, person):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} in a manner that should be used for normal displaying.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
name = person.get_primary_name()
return self.display_name(name)
def display_formal(self, person):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} in a manner that should be used for formal displaying.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
# FIXME: At this time, this is just duplicating display() method
name = person.get_primary_name()
return self.display_name(name)
def display_name(self, name):
"""
Return a text string representing the L{Name} instance
in a manner that should be used for normal displaying.
@param name: L{Name} instance that is to be displayed.
@type name: L{Name}
@returns: Returns the L{Name} string representation
@rtype: str
"""
if name is None:
return ""
num = self._is_format_valid(name.display_as)
return self.name_formats[num][_F_FN](name)
def raw_display_name(self, raw_data):
"""
Return a text string representing the L{Name} instance
in a manner that should be used for normal displaying.
@param name: L{raw_data} raw unserialized data of name that is to be displayed.
@type name: tuple
@returns: Returns the L{Name} string representation
@rtype: str
"""
num = self._is_format_valid(raw_data[_DISPLAY])
return self.name_formats[num][_F_RAWFN](raw_data)
def display_given(self, person):
return self.format_str(person.get_primary_name(),'%f')
def name_grouping(self, db, person):
"""
Return the name under which to group this person. This is defined as:
1/ if group name is defined on primary name, use that
2/ if group name is defined for the primary surname of the primary name, use that
3/ use primary surname of primary name otherwise
"""
return self.name_grouping_name(db, person.primary_name)
def name_grouping_name(self, db, pn):
"""
Return the name under which to group. This is defined as:
1/ if group name is defined, use that
2/ if group name is defined for the primary surname, use that
3/ use primary surname itself otherwise
@param pn: L{Name} object
@type pn: L{Name} instance
@returns: Returns the groupname string representation
@rtype: str
"""
if pn.group_as:
return pn.group_as
return db.get_name_group_mapping(pn.get_primary_surname().get_surname())
def name_grouping_data(self, db, pn):
"""
Return the name under which to group. This is defined as:
1/ if group name is defined, use that
2/ if group name is defined for the primary surname, use that
3/ use primary surname itself otherwise
@param pn: raw unserialized data of name
@type pn: tuple
@returns: Returns the groupname string representation
@rtype: str
"""
if pn[_GROUP]:
return pn[_GROUP]
return db.get_name_group_mapping(_raw_primary_surname_only(
pn[_SURNAME_LIST]))
def _make_fn(self, format_str, d, args):
"""
Create the name display function and handles dependent
punctuation.
"""
# d is a dict: dict[code] = (expr, word, translated word)
# First, go through and do internationalization-based
# key-word replacement. Just replace ikeywords with
# %codes (ie, replace "irstnamefay" with "%f", and
# "IRSTNAMEFAY" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[2]) for code, _tuple in d.iteritems()]
d_keys.sort(_make_cmp) # reverse on length and by ikeyword
for (code, ikeyword) in d_keys:
exp, keyword, ikeyword = d[code]
#ikeyword = unicode(ikeyword, "utf8")
format_str = format_str.replace(ikeyword, "%"+ code)
format_str = format_str.replace(ikeyword.title(), "%"+ code)
format_str = format_str.replace(ikeyword.upper(), "%"+ code.upper())
# Next, go through and do key-word replacement.
# Just replace keywords with
# %codes (ie, replace "firstname" with "%f", and
# "FIRSTNAME" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[1]) for code, _tuple in d.iteritems()]
d_keys.sort(_make_cmp) # reverse sort on length and by keyword
# if in double quotes, just use % codes
for (code, keyword) in d_keys:
exp, keyword, ikeyword = d[code]
keyword = unicode(keyword, "utf8")
format_str = format_str.replace(keyword, "%"+ code)
format_str = format_str.replace(keyword.title(), "%"+ code)
format_str = format_str.replace(keyword.upper(), "%"+ code.upper())
# Get lower and upper versions of codes:
codes = d.keys() + [c.upper() for c in d]
# Next, list out the matching patterns:
# If it starts with "!" however, treat the punctuation verbatim:
if len(format_str) > 0 and format_str[0] == "!":
patterns = ["%(" + ("|".join(codes)) + ")", # %s
]
format_str = format_str[1:]
else:
patterns = [
",\W*\"%(" + ("|".join(codes)) + ")\"", # ,\W*"%s"
",\W*\(%(" + ("|".join(codes)) + ")\)", # ,\W*(%s)
",\W*%(" + ("|".join(codes)) + ")", # ,\W*%s
"\"%(" + ("|".join(codes)) + ")\"", # "%s"
"_%(" + ("|".join(codes)) + ")_", # _%s_
"\(%(" + ("|".join(codes)) + ")\)", # (%s)
"%(" + ("|".join(codes)) + ")", # %s
]
new_fmt = format_str
# replace the specific format string flags with a
# flag that works in standard python format strings.
new_fmt = re.sub("|".join(patterns), "%s", new_fmt)
# find each format flag in the original format string
# for each one we find the variable name that is needed to
# replace it and add this to a list. This list will be used to
# generate the replacement tuple.
# This compiled pattern should match all of the format codes.
pat = re.compile("|".join(patterns))
param = ()
mat = pat.search(format_str)
while mat:
match_pattern = mat.group(0) # the matching pattern
# prefix, code, suffix:
p, code, s = re.split("%(.)", match_pattern)
if code in '0123456789':
code = code + s[0]
s = s[1:]
field = d[code.lower()][0]
if code.isupper():
field += ".upper()"
if p == '' and s == '':
param = param + (field,)
else:
param = param + ("ifNotEmpty(%s,'%s','%s')" % (field, p, s), )
mat = pat.search(format_str, mat.end())
s = """
def fn(%s):
def ifNotEmpty(str,p,s):
if str == '':
return ''
else:
return p + str + s
return cleanup_name("%s" %% (%s))""" % (args, new_fmt, ",".join(param))
exec(s)
return fn
displayer = NameDisplay()
| arunkgupta/gramps | gramps/gen/display/name.py | Python | gpl-2.0 | 41,723 | [
"Brian"
] | 9efd9f836ab79c1cd3758801b714924c19d657b98f0afc347034677dcc3e838f |
"""
Shopcart Steps
Steps file for shopcarts.feature
"""
from os import getenv
import json
import requests
from behave import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
@when(u'I visit the "Home Page"')
def step_impl(context):
""" Make a call to the base URL """
context.driver.get(context.base_url)
@then(u'I should see "{message}"')
def step_impl(context, message):
assert message in context.driver.find_element_by_id('app-js').text
@then(u'I should see "{user_id}" in the results')
def step_impl(context, user_id):
row = 'shopcart-' + user_id + '-row'
wait = WebDriverWait(context.driver, 10)
wait.until(EC.visibility_of_element_located((By.ID, row)))
assert user_id in context.driver.find_element_by_id( row).text
@then(u'I should not see "{message}"')
def step_impl(context, message):
assert message not in context.driver.find_element_by_id('app-js').text
# CREATE SHOPCART
@when(u'I set the Shopcart "{user_id}" to "{value}"')
def step_impl(context, user_id, value):
element = context.driver.find_element_by_id(user_id)
element.clear()
element.send_keys(value)
@when(u'I click the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
# wait = WebDriverWait(context.driver, 10)
# wait.until(EC.visibility_of_element_located((By.ID, 'shopcart-header')))
@then(u'I should see Shopcart "{id}" in the results')
def step_impl(context, id):
element_id = 'shopcart-' + id + '-row'
assert context.driver.find_element_by_id(element_id)
@then(u'I should not see "{error}" in the form')
def step_impl(context, error):
assert not context.driver.find_element_by_id('form-error').text == error
# DELETE SHOPCART
@given(u'the following shopcarts')
def step_impl(context):
""" Delete all Shopcarts and load new ones """
headers = {'Content-Type': 'application/json'}
context.resp = requests.delete(context.base_url + '/shopcarts/reset', headers=headers)
assert context.resp.status_code == 204
create_url = context.base_url + '/shopcarts'
for row in context.table:
data = {"user_id": row['user_id']}
if 'product_id' in context.table.headings:
data['products'] = [{
'pid': int( row['product_id']),
'quantity': int( row['quantity'])
}]
payload = json.dumps(data)
context.resp = requests.post(create_url, data=payload, headers=headers)
assert context.resp.status_code == 201
@when(u'I visit Shopcart "{user_id}"')
def step_impl(context, user_id):
button_id = 'view-shopcart-' + user_id
context.driver.find_element_by_id(button_id).click()
wait = WebDriverWait(context.driver, 10)
wait.until(EC.visibility_of_element_located((By.ID, 'shopcart-header')))
@then(u'I should see "{message}" in the header')
def step_impl(context, message):
assert message in context.driver.find_element_by_id('shopcart-header').text
@when(u'I delete product "{product_id}" from the cart')
def step_impl(context, product_id):
button_id = 'product-' + product_id + "-delete"
context.driver.find_element_by_id(button_id).click()
@then(u'I should not see Shopcart "{user_id}" in the results')
def step_impl(context, user_id):
element = context.driver.find_element_by_id('shopcarts-table-list')
assert not element.find_elements_by_id('shopcart-' + user_id + '-row')
@then(u'I should see "{message}" on the cart page')
def step_impl(context, message):
parent = context.driver.find_element_by_class_name('mb-3')
child = parent.find_element_by_class_name('card-body')
assert message in child.text
@when(u'I add "{quantity}" of Product "{product_id}" to the cart')
def step_impl(context, quantity, product_id):
wait = WebDriverWait(context.driver, 10)
wait.until(EC.visibility_of_element_located((By.ID, 'product-' + product_id + '-select' )))
element = context.driver.find_element_by_id('product-' + product_id + '-select' )
element.clear()
element.send_keys(int(quantity))
@then(u'I should see "{quantity}" of Product "{product_id}" in the products list')
def step_impl(context, quantity, product_id):
element_value = context.driver.find_element_by_id('shopcart-product-' + product_id + '-quantity').get_attribute('value')
assert quantity == element_value
@when(u'I have "{quantity}" Shopcarts in the results')
def step_impl(context, quantity):
wait = WebDriverWait(context.driver, 10)
wait.until(EC.visibility_of_element_located((By.ID, 'shopcarts-table-list')))
element = context.driver.find_element_by_id('shopcarts-table-list')
assert len(element.find_elements_by_css_selector('tbody > tr')) == int(quantity)
@then(u'I should have "{quantity}" Shopcarts in the results')
def step_impl(context, quantity):
element = context.driver.find_element_by_id('shopcarts-table-list')
if int(quantity) > 0:
assert len(element.find_elements_by_css_selector('tbody > tr')) == int(quantity)
else:
assert element.find_elements_by_id('empty-shopcarts')
@when(u'I filter by product "{product_name}"')
def step_impl(context, product_name):
select_element = context.driver.find_element_by_id('filter')
for option in select_element.find_elements_by_tag_name('option'):
if option.text == product_name:
option.click()
break
@when(u'I update product "{product_id}" to quantity "{quantity}"')
def step_impl(context, product_id, quantity):
element = context.driver.find_element_by_id('shopcart-product-' + product_id + '-quantity')
element.clear()
element.send_keys(int(quantity))
element.send_keys(Keys.ENTER)
| nyu-devops-echo/shopcarts | features/steps/shopcart_steps.py | Python | apache-2.0 | 5,904 | [
"VisIt"
] | 3c94e2cfa08cc83f311714cdcb6e591f9a8508637a536cf10ff288772132e03f |
"""
Adapted from Cython/Compiler/Visitor.py, see this module for detailed
explanations.
"""
import inspect
miniast = None # avoid circular import AttributeError for sphinx-apidoc
import treepath
class TreeVisitor(object):
"""
Non-mutating visitor. Subclass and implement visit_MyNode methods.
A user can traverse a foreign AST by implementing
:py:class:`minivect.miniast.Context.getchildren`
"""
want_access_path = False
def __init__(self, context):
self.context = context
self.dispatch_table = {}
if self.want_access_path:
self.access_path = []
else:
self._visitchild = self.visit
def _find_handler(self, obj):
# to resolve, try entire hierarchy
cls = type(obj)
pattern = "visit_%s"
mro = inspect.getmro(cls)
handler_method = None
for mro_cls in mro:
handler_method = getattr(self, pattern % mro_cls.__name__, None)
if handler_method is not None:
return handler_method
raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
def visit(self, obj, *args):
"Visit a single child."
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self._find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self.visit(child)
self.access_path.pop()
return result
def visit_childlist(self, child, parent=None, attr=None):
if isinstance(child, list):
childretval = [self._visitchild(child_node, parent, attr, idx)
for idx, child_node in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
if isinstance(childretval, list):
raise RuntimeError(
'Cannot insert list here: %s in %r' % (attr, node))
return childretval
def visitchildren(self, parent, attrs=None):
"Visits the children of the given node."
if parent is None:
return None
if attrs is None:
attrs = self.context.getchildren(parent)
result = {}
for attr in attrs:
child = getattr(parent, attr)
if child is not None:
result[attr] = self.visit_childlist(child, parent, attr)
return result
def treepath(self, node, xpath_expr):
return treepath.iterfind(node, xpath_expr)
def treepath_first(self, node, xpath_expr):
return treepath.find_first(node, xpath_expr)
def p(self, node):
node.print_tree(self.context)
class VisitorTransform(TreeVisitor):
"""
Mutating transform. Each attribute is replaced by the result of the
corresponding visit_MyNode method.
"""
def visitchildren(self, parent, attrs=None):
result = super(VisitorTransform, self).visitchildren(parent, attrs)
for attr, newnode in result.iteritems():
if not type(newnode) is list:
setattr(parent, attr, newnode)
else:
# Flatten the list one level and remove any None
newlist = []
for x in newnode:
if x is not None:
if type(x) is list:
newlist += x
else:
newlist.append(x)
setattr(parent, attr, newlist)
return result
class GenericVisitor(TreeVisitor):
"Generic visitor that automatically visits children"
def visit_Node(self, node):
self.visitchildren(node)
return node
class GenericTransform(VisitorTransform, GenericVisitor):
"Generic transform that automatically visits children"
class MayErrorVisitor(TreeVisitor):
"""
Determine whether code generated by an AST can raise exceptions.
"""
may_error = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_NodeWrapper(self, node):
self.may_error = (self.may_error or
self.context.may_error(node.opaque_node))
def visit_ForNode(self, node):
self.visit(node.init)
self.visit(node.condition)
self.visit(node.step)
class PrintTree(TreeVisitor):
"""
Print an AST, see also :py:class:`minivect.miniast.Node.print_tree`.
"""
indent = 0
want_access_path = True
def format_value(self, node):
import miniast
if node.is_temp:
format_value = node.repr_name
elif (isinstance(node, miniast.Variable) or
isinstance(node, miniast.FuncNameNode) or
node.is_funcarg):
format_value = node.name
elif node.is_binop or node.is_unop:
format_value = node.operator
elif node.is_constant:
format_value = node.value
elif node.is_sizeof:
format_value = str(node.type)
else:
return None
return format_value
def format_node(self, node, want_type_info=True):
result = type(node).__name__
format_value = self.format_value(node)
if node.is_expression and want_type_info:
if format_value is not None:
format_value = "%s, type=%s" % (format_value, node.type)
else:
format_value = "type=%s" % (node.type,)
if format_value:
return "%s(%s)" % (result, format_value)
else:
return result
def visit_Node(self, node):
if self.access_path:
parent, attr, idx = self.access_path[-1]
else:
attr = "(root)"
idx = None
prefix = "%s%s" % (self.indent * " ", attr)
if idx is not None:
prefix = "%s[%d]" % (prefix, idx)
print "%s: %s" % (prefix, self.format_node(node))
self.indent += 1
self.visitchildren(node)
self.indent -= 1 | markflorisson/minivect | minivect/minivisitor.py | Python | bsd-2-clause | 6,210 | [
"VisIt"
] | b5207da91310889794ab5d0a18f7fb2ba9e4cc3a16e2fb6a3279b2b00d994ce4 |
# main.py
# Aaron Taylor
# Moose Abumeeiz
#
# The main file for our final project. This is a replica of
# the popular game The Binding waof Isaac: Rebirth.
#
from const import *
from pygame import *
from time import time as cTime
from random import *
from func import *
from Game import *
from menu import *
from JoystickController import *
import os
init() # Initalize pygame
joystick.init() # Allow joystick support
# Joystick controller
jController = None
# If there is a joystick, initialize with JoystickController
jCount = joystick.get_count()
if jCount > 0:
joysticks = [joystick.Joystick(i) for i in range(jCount)]
joysticks[0].init() # Default to first joystick
jController = JoystickController(joysticks[0], 0.5)
else:
joystick.quit() # Deinit joystick module
# Create display
screen = display.set_mode((WIDTH, HEIGHT))
# Current song setup
nextSong = ""
changeSong = -1
def playMusic(name, intro=""):
"Plays music with possible intro"
global nextSong, changeSong
if os.name == "posix": # Mac (music is broken)
return
# Reset variables
nextSong = ""
changeSong = -1
# If there is an intro, load and play it, set next songs
if len(intro) > 0:
intro = os.path.join('res', 'music', intro)
mixer.music.load(intro)
mixer.music.play(0) # Play music once
nextSong = os.path.join('res', 'music', name)
changeSong = mixer.Sound(intro).get_length() - 0.05
else:
# Just play the song normally
mixer.music.load(os.path.join('res', 'music', name))
mixer.music.play(-1)
def showSymbol(screen, length, index, textures):
"Show loading screen symbol"
start = cTime()
texture = textures["loading"][index]
w = texture.get_width()
h = texture.get_height()
running = True
while running:
for e in event.get():
if e.type == QUIT or e.type == KEYDOWN and e.key == 27:
quit()
# Draw the centered texture
screen.fill((0,0,0))
screen.blit(texture, (WIDTH//2-w//2,HEIGHT//2-h//2))
display.flip()
# Only run for a certian ammount of time
if cTime() - start >= length:
running = False
# Setup display
display.set_caption("The Binding of Isaac: Rebirth")
display.set_icon(image.load(os.path.join('res','textures', 'icon.png')))
# Load all needed textures
textures = {
"hearts": loadTexture("hearts.png"),
"pickups": loadTexture("pickups.png"),
"character": [darken(loadTexture(["lazarus.png", "isaac.png", "eve.png"][i]), .1) for i in range(3)],
"floors": [loadTexture("basement.png"),
loadTexture("caves.png"),
loadTexture("catacombs.png"),
loadTexture("depths.png"),
loadTexture("necropolis.png"),
loadTexture("womb.png"),
loadTexture("utero.png"),
loadTexture("shop.png"),
],
"controls": loadTexture("controls.png"),
"doors": [[loadTexture("door.png"), loadTexture("dark_door.png"), loadTexture("red_door.png")],
loadTexture("treasure_door.png"),
loadTexture("boss_door.png"),
loadTexture("devil_door.png"),
loadTexture("angel_door.png")],
"controls": loadTexture("controls.png"),
"rocks": darken(loadTexture("rocks.png"), .1),
"poops": loadTexture("poops.png"),
"tears": [loadTexture("tears.png"), loadTexture("tear_pop.png")],
"fires": [loadTexture("fire_top.png"), loadTexture("fire_bottom.png")],
"bombs": [loadTexture("bombs.png"), [loadTexture("explosion.png")], loadTexture("smut.png")],
"coins": [loadTexture("penny.png"), loadTexture("nickel.png"), loadTexture("dime.png")],
"keys": loadTexture("keys.png"),
"pickupHearts": loadTexture("pickup_hearts.png"),
"overlays": [loadTexture("%i.png"%i, dir="overlays") for i in range(5)],
"shading": loadTexture("shading.png"),
"loading": [loadTexture("%i.png"%(i+1), dir="loading") for i in range(56)],
"pauseCard": loadTexture("pauseCard.png", dir="pause"),
"seedCard": loadTexture("seedcard.png", dir="pause"),
"arrow": loadTexture("arrow.png", dir="pause", double=False),
"pills": loadTexture("pills.png"),
"trapdoor": loadTexture("trap_door.png"),
"phd": loadTexture("phd.png"),
"streak": loadTexture("streak.png"),
"map": {
"background": loadTexture("minimap.png").subsurface(0, 0, 112, 102),
"in": loadTexture("minimap.png").subsurface(113, 0, 16, 16),
"entered": loadTexture("minimap.png").subsurface(113, 16, 16, 16),
"seen": loadTexture("minimap.png").subsurface(113, 32, 16, 16),
"item": loadTexture("minimap.png").subsurface(113, 48, 16, 16),
"boss": loadTexture("minimap.png").subsurface(113, 64, 16, 16),
},
"enemies": {
"fly": loadTexture("fly.png", dir="enemies"),
"pooter": loadTexture("pooter.png", dir="enemies"),
"maw": loadTexture("maw.png", dir="enemies"),
"boil": loadTexture("boil.png", dir="enemies"),
"host": loadTexture("host.png", dir="enemies"),
},
"bosses": {
"gurdy": loadTexture("gurdy.png", dir="bosses"),
"duke": loadTexture("duke.png", dir="bosses"),
}
}
# Load all sounds we need
sounds = {
"pop": loadSound("pop.wav"),
"explosion": loadSound("explosion.wav"),
"hurt": [loadSound("hurt1.wav"), loadSound("hurt2.wav")],
"tear": [loadSound("tear1.wav"), loadSound("tear2.wav"), loadSound("tearPop.wav"), loadSound("tearSplat.wav")],
"unlock": loadSound("unlock.wav"),
"devilRoomAppear": loadSound("devilRoomAppear.wav"),
"angelRoomAppear": loadSound("angelRoomAppear.wav"),
"coinDrop": loadSound("coinDrop.wav"),
"coinPickup": loadSound("coinPickup.wav"),
"fireBurn": loadSound("fireBurning.wav"),
"steam": loadSound("steam.wav"),
"keyDrop": loadSound("keyDrop.wav"),
"keyPickup": loadSound("keyPickup.wav"),
"heartIntake": loadSound("heartIntake.wav"),
"holy": loadSound("holy.wav"),
"rockBreak": loadSound("rockBreak.wav"),
"doorOpen": loadSound("doorOpen.wav"),
"doorClose": loadSound("doorClose.wav"),
"deathBurst": loadSound("deathBurst.wav"),
"pageTurn": loadSound("pageTurn.wav"),
"error": loadSound("error.wav"),
"selectLeft": loadSound("selectLeft.wav"),
"selectRight": loadSound("selectRight.wav"),
"bossIntro": loadSound("bossIntro.wav"),
}
# Load fonts
fonts = {
"main": loadCFont("main.png", 20, 16, 36, size=1.8),
"pickups": loadCFont("pickup.png", 10, 12, 10),
"ticks": loadCFont("ticks.png", 4, 17 , 8),
}
# Begin main loop
running = True
while running:
# Start by playing the title screen music
playMusic("titleScreenLoop.ogg", intro="titleScreenIntro.ogg")
# Begin menu
characterType, controls, floorSeed = menu(screen, jController, sounds,nextSong, changeSong)
# Floor setup
seed(floorSeed)
# Define current time
currTime = 0
# Define clock (mainly for FPS)
clock = time.Clock()
# Play the choir noise when the user chooses a level
# and show the random symboly
playMusic("titleScreenJingle.ogg")
showSymbol(screen, 4, randint(0, 55), textures)
# Play the normal game music
playMusic("basementLoop.ogg", intro="basementIntro.ogg")
# Start game
game = Game(characterType, controls, floorSeed)
game.run(screen, sounds, textures, fonts, joystick=jController)
quit()
| ExPHAT/binding-of-isaac | main.py | Python | mit | 6,887 | [
"MOOSE"
] | a9fa8929e948ed8210913def0fb1176c401e13d8750666c00870a1b4957c3b88 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_poolgroup
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of PoolGroup Avi RESTful Object
description:
- This module is used to configure PoolGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for poolgroup.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
created_by:
description:
- Name of the user who created the object.
deployment_policy_ref:
description:
- When setup autoscale manager will automatically promote new pools into production when deployment goals are met.
- It is a reference to an object of type poolgroupdeploymentpolicy.
description:
description:
- Description of pool group.
fail_action:
description:
- Enable an action - close connection, http redirect, or local http response - when a pool group failure happens.
- By default, a connection will be closed, in case the pool group experiences a failure.
members:
description:
- List of pool group members object of type poolgroupmember.
min_servers:
description:
- The minimum number of servers to distribute traffic to.
- Allowed values are 1-65535.
- Special values are 0 - 'disable'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
name:
description:
- The name of the pool group.
required: true
priority_labels_ref:
description:
- Uuid of the priority labels.
- If not provided, pool group member priority label will be interpreted as a number with a larger number considered higher priority.
- It is a reference to an object of type prioritylabels.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the pool group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PoolGroup object
avi_poolgroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_poolgroup
"""
RETURN = '''
obj:
description: PoolGroup (api/poolgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
created_by=dict(type='str',),
deployment_policy_ref=dict(type='str',),
description=dict(type='str',),
fail_action=dict(type='dict',),
members=dict(type='list',),
min_servers=dict(type='int',),
name=dict(type='str', required=True),
priority_labels_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'poolgroup',
set([]))
if __name__ == '__main__':
main()
| kbrebanov/ansible | lib/ansible/modules/network/avi/avi_poolgroup.py | Python | gpl-3.0 | 5,203 | [
"VisIt"
] | 1a4a24af5fdae279c9bcb22d44bf5f44a0c63007769074d3a5d7e835c989c513 |
########################################################################
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Utilities for managing DIRAC configuration:
getCEsFromCS
getUnusedGridCEs
getUnusedGridSEs
getSiteUpdates
getSEUpdates
"""
__RCSID__ = "$Id$"
import re
import socket
from urlparse import urlparse
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import getBdiiCEInfo, getBdiiSEInfo, ldapService
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForHostName
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
def getGridVOs():
""" Get all the VOMS VO names served by this DIRAC service
"""
voNames = []
result = getVOs()
if not result['OK']:
return result
else:
vos = result['Value']
for vo in vos:
vomsVO = getVOOption(vo, "VOMSName")
if vomsVO:
voNames.append(vomsVO)
return S_OK(voNames)
def getCEsFromCS():
""" Get all the CEs defined in the CS
"""
knownCEs = []
result = gConfig.getSections('/Resources/Sites')
if not result['OK']:
return result
grids = result['Value']
for grid in grids:
result = gConfig.getSections('/Resources/Sites/%s' % grid)
if not result['OK']:
return result
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict('/Resources/Sites/%s/%s' % (grid, site))['Value']
ces = List.fromChar(opt.get('CE', ''))
knownCEs += ces
return S_OK(knownCEs)
def getSEsFromCS(protocol='srm'):
""" Get all the SEs defined in the CS
"""
knownSEs = {}
result = gConfig.getSections('/Resources/StorageElements')
if not result['OK']:
return result
ses = result['Value']
for se in ses:
seSection = '/Resources/StorageElements/%s' % se
result = gConfig.getSections(seSection)
if not result['OK']:
continue
accesses = result['Value']
for access in accesses:
seProtocol = gConfig.getValue(cfgPath(seSection, access, 'Protocol'), '')
if seProtocol.lower() == protocol.lower() or protocol == 'any':
host = gConfig.getValue(cfgPath(seSection, access, 'Host'), '')
knownSEs.setdefault(host, [])
knownSEs[host].append(se)
else:
continue
return S_OK(knownSEs)
def getGridCEs(vo, bdiiInfo=None, ceBlackList=None, hostURL=None, glue2=False):
""" Get all the CEs available for a given VO and having queues in Production state
"""
knownCEs = set()
if ceBlackList is not None:
knownCEs = knownCEs.union(set(ceBlackList))
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo, host=hostURL, glue2=glue2)
if not result['OK']:
return result
ceBdiiDict = result['Value']
siteDict = {}
for site in ceBdiiDict:
siteCEs = set(ceBdiiDict[site]['CEs'].keys())
newCEs = siteCEs - knownCEs
if not newCEs:
continue
ceFullDict = {}
for ce in newCEs:
ceDict = {}
ceInfo = ceBdiiDict[site]['CEs'][ce]
ceType = 'Unknown'
ceDict['Queues'] = []
for queue in ceInfo['Queues']:
queueStatus = ceInfo['Queues'][queue].get('GlueCEStateStatus', 'UnknownStatus')
if 'production' in queueStatus.lower():
ceType = ceInfo['Queues'][queue].get('GlueCEImplementationName', '')
ceDict['Queues'].append(queue)
if not ceDict['Queues']:
continue
ceDict['CEType'] = ceType
ceDict['GOCSite'] = site
ceDict['CEID'] = ce
systemName = ceInfo.get('GlueHostOperatingSystemName', 'Unknown')
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', 'Unknown')
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', 'Unknown')
ceDict['System'] = (systemName, systemVersion, systemRelease)
ceFullDict[ce] = ceDict
siteDict[site] = ceFullDict
result = S_OK(siteDict)
result['BdiiInfo'] = ceBdiiDict
return result
def getSiteUpdates(vo, bdiiInfo=None, log=None):
""" Get all the necessary updates for the already defined sites and CEs
"""
def addToChangeSet(entry, changeSet):
""" Inner function to update changeSet with entry (a tuple)
:param tuple entry: entry to add to changeSet
:param set changeSet: set collecting stuff to change
"""
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
if log is None:
log = gLogger
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo)
if not result['OK']:
return result
ceBdiiDict = result['Value']
changeSet = set()
for site in ceBdiiDict:
result = getDIRACSiteName(site)
if not result['OK']:
continue
siteNames = result['Value']
for siteName in siteNames:
siteSection = cfgPath('/Resources', 'Sites', siteName.split('.')[0], siteName)
result = gConfig.getOptionsDict(siteSection)
if not result['OK']:
continue
siteDict = result['Value']
# Current CS values
coor = siteDict.get('Coordinates', 'Unknown')
mail = siteDict.get('Mail', 'Unknown').replace(' ', '')
description = siteDict.get('Description', 'Unknown')
description = description.replace(' ,', ',')
longitude = ceBdiiDict[site].get('GlueSiteLongitude', '').strip()
latitude = ceBdiiDict[site].get('GlueSiteLatitude', '').strip()
# Current BDII value
newcoor = ''
if longitude and latitude:
newcoor = "%s:%s" % (longitude, latitude)
newmail = ceBdiiDict[site].get('GlueSiteSysAdminContact', '').replace('mailto:', '').strip()
newdescription = ceBdiiDict[site].get('GlueSiteDescription', '').strip()
newdescription = ", ".join([line.strip() for line in newdescription.split(",")])
# Adding site data to the changes list
addToChangeSet((siteSection, 'Coordinates', coor, newcoor), changeSet)
addToChangeSet((siteSection, 'Mail', mail, newmail), changeSet)
addToChangeSet((siteSection, 'Description', description, newdescription), changeSet)
ces = gConfig.getValue(cfgPath(siteSection, 'CE'), [])
for ce in ces:
ceSection = cfgPath(siteSection, 'CEs', ce)
ceDict = {}
result = gConfig.getOptionsDict(ceSection)
if result['OK']:
ceDict = result['Value']
else:
if ceBdiiDict[site]['CEs'].get(ce, None):
log.notice("Adding new CE", "%s to site %s/%s" % (ce, siteName, site))
ceInfo = ceBdiiDict[site]['CEs'].get(ce, None)
if ceInfo is None:
ceType = ceDict.get('CEType', '')
continue
# Current CS CE info
arch = ceDict.get('architecture', 'Unknown')
OS = ceDict.get('OS', 'Unknown')
si00 = ceDict.get('SI00', 'Unknown')
ceType = ceDict.get('CEType', 'Unknown')
ram = ceDict.get('MaxRAM', 'Unknown')
submissionMode = ceDict.get('SubmissionMode', 'Unknown')
# Current BDII CE info
newarch = ceBdiiDict[site]['CEs'][ce].get('GlueHostArchitecturePlatformType', '').strip()
systemName = ceInfo.get('GlueHostOperatingSystemName', '').strip()
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', '').strip()
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', '').strip()
newOS = ''
if systemName and systemVersion and systemRelease:
newOS = '_'.join((systemName, systemVersion, systemRelease))
newsi00 = ceInfo.get('GlueHostBenchmarkSI00', '').strip()
newCEType = 'Unknown'
for queue in ceInfo['Queues']:
queueDict = ceInfo['Queues'][queue]
newCEType = queueDict.get('GlueCEImplementationName', '').strip()
if newCEType:
break
if newCEType == 'ARC-CE':
newCEType = 'ARC'
newSubmissionMode = None
if newCEType in ['ARC', 'CREAM']:
newSubmissionMode = "Direct"
newRAM = ceInfo.get('GlueHostMainMemoryRAMSize', '').strip()
# Protect from unreasonable values
if newRAM and int(newRAM) > 150000:
newRAM = ''
# Adding CE data to the change list
addToChangeSet((ceSection, 'architecture', arch, newarch), changeSet)
addToChangeSet((ceSection, 'OS', OS, newOS), changeSet)
addToChangeSet((ceSection, 'SI00', si00, newsi00), changeSet)
addToChangeSet((ceSection, 'CEType', ceType, newCEType), changeSet)
addToChangeSet((ceSection, 'MaxRAM', ram, newRAM), changeSet)
if submissionMode == "Unknown" and newSubmissionMode:
addToChangeSet((ceSection, 'SubmissionMode', submissionMode, newSubmissionMode), changeSet)
queues = ceInfo['Queues'].keys()
for queue in queues:
queueInfo = ceInfo['Queues'][queue]
queueStatus = queueInfo['GlueCEStateStatus']
queueSection = cfgPath(ceSection, 'Queues', queue)
queueDict = {}
result = gConfig.getOptionsDict(queueSection)
if result['OK']:
queueDict = result['Value']
else:
if queueStatus.lower() == "production":
log.notice("Adding new queue", "%s to CE %s" % (queue, ce))
else:
continue
# Current CS queue info
maxCPUTime = queueDict.get('maxCPUTime', 'Unknown')
si00 = queueDict.get('SI00', 'Unknown')
maxTotalJobs = queueDict.get('MaxTotalJobs', 'Unknown')
# Current BDII queue info
newMaxCPUTime = queueInfo.get('GlueCEPolicyMaxCPUTime', '')
if newMaxCPUTime == "4" * len(newMaxCPUTime) or newMaxCPUTime == "9" * len(newMaxCPUTime):
newMaxCPUTime = ''
wallTime = queueInfo.get('GlueCEPolicyMaxWallClockTime', '')
if wallTime == "4" * len(wallTime) or wallTime == "9" * len(wallTime):
wallTime = ''
if wallTime and int(wallTime) > 0:
if not newMaxCPUTime:
newMaxCPUTime = str(int(0.8 * int(wallTime)))
else:
if int(wallTime) <= int(newMaxCPUTime):
newMaxCPUTime = str(int(0.8 * int(wallTime)))
newSI00 = ''
caps = queueInfo.get('GlueCECapability', [])
if isinstance(caps, basestring):
caps = [caps]
for cap in caps:
if 'CPUScalingReferenceSI00' in cap:
newSI00 = cap.split('=')[-1]
# Adding queue info to the CS
addToChangeSet((queueSection, 'maxCPUTime', maxCPUTime, newMaxCPUTime), changeSet)
addToChangeSet((queueSection, 'SI00', si00, newSI00), changeSet)
if maxTotalJobs == "Unknown":
newTotalJobs = min(1000, int(int(queueInfo.get('GlueCEInfoTotalCPUs', 0)) / 2))
newWaitingJobs = max(2, int(newTotalJobs * 0.1))
newTotalJobs = str(newTotalJobs)
newWaitingJobs = str(newWaitingJobs)
addToChangeSet((queueSection, 'MaxTotalJobs', '', newTotalJobs), changeSet)
addToChangeSet((queueSection, 'MaxWaitingJobs', '', newWaitingJobs), changeSet)
# Updating eligible VO list
VOs = set()
if queueDict.get('VO', ''):
VOs = set([q.strip() for q in queueDict.get('VO', '').split(',') if q])
if vo not in VOs:
VOs.add(vo)
VOs = list(VOs)
newVOs = ','.join(VOs)
addToChangeSet((queueSection, 'VO', '', newVOs), changeSet)
return S_OK(changeSet)
def getGridSEs(vo, bdiiInfo=None, seBlackList=None):
""" Get all the SEs available for a given VO
"""
seBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = result['Value']
knownSEs = set()
if seBlackList is not None:
knownSEs = knownSEs.union(set(seBlackList))
siteDict = {}
for site in seBdiiDict:
for gridSE in seBdiiDict[site]['SEs']:
seDict = seBdiiDict[site]['SEs'][gridSE]
# if "lhcb" in seDict['GlueSAName']:
# print '+'*80
# print gridSE
# for k,v in seDict.items():
# print k,'\t',v
if gridSE not in knownSEs:
siteDict.setdefault(site, {})
if isinstance(seDict['GlueSAAccessControlBaseRule'], list):
voList = [re.sub('^VO:', '', s) for s in seDict['GlueSAAccessControlBaseRule']]
else:
voList = [re.sub('^VO:', '', seDict['GlueSAAccessControlBaseRule'])]
siteDict[site][gridSE] = {'GridSite': seDict['GlueSiteUniqueID'],
'BackendType': seDict['GlueSEImplementationName'],
'Description': seDict.get('GlueSEName', '-'),
'VOs': voList
}
result = S_OK(siteDict)
result['BdiiInfo'] = seBdiiDict
return result
def getGridSRMs(vo, bdiiInfo=None, srmBlackList=None, unUsed=False):
result = ldapService(serviceType='SRM', vo=vo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
knownSRMs = set()
if srmBlackList is not None:
knownSRMs = knownSRMs.union(set(srmBlackList))
siteSRMDict = {}
for srm in srmBdiiDict:
srm = dict(srm)
endPoint = srm.get('GlueServiceEndpoint', '')
srmHost = ''
if endPoint:
srmHost = urlparse(endPoint).hostname
if not srmHost:
continue
if srmHost in knownSRMs:
continue
if unUsed:
result = getDIRACSesForHostName(srmHost)
if not result['OK']:
return result
diracSEs = result['Value']
if diracSEs:
# If it is a known SRM and only new SRMs are requested, continue
continue
site = srm.get('GlueForeignKey', '').replace('GlueSiteUniqueID=', '')
siteSRMDict.setdefault(site, {})
siteSRMDict[site][srmHost] = srm
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = dict(result['Value'])
else:
seBdiiDict = dict(bdiiInfo)
srmSeDict = {}
for site in siteSRMDict:
srms = siteSRMDict[site].keys()
for srm in srms:
if seBdiiDict.get(site, {}).get('SEs', {}).get(srm, {}):
srmSeDict.setdefault(site, {})
srmSeDict[site].setdefault(srm, {})
srmSeDict[site][srm]['SRM'] = siteSRMDict[site][srm]
srmSeDict[site][srm]['SE'] = seBdiiDict[site]['SEs'][srm]
return S_OK(srmSeDict)
def getSRMUpdates(vo, bdiiInfo=None):
changeSet = set()
def addToChangeSet(entry, changeSet):
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
result = getGridSRMs(vo, bdiiInfo=bdiiInfo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
result = getSEsFromCS()
if not result['OK']:
return result
seDict = result['Value']
result = getVOs()
if result['OK']:
csVOs = set(result['Value'])
else:
csVOs = set([vo])
for seHost, diracSE in seDict.items():
seSection = '/Resources/StorageElements/%s' % diracSE[0]
# Look up existing values first
description = gConfig.getValue(cfgPath(seSection, 'Description'), 'Unknown')
backend = gConfig.getValue(cfgPath(seSection, 'BackendType'), 'Unknown')
vos = gConfig.getValue(cfgPath(seSection, 'VO'), 'Unknown').replace(' ', '')
size = gConfig.getValue(cfgPath(seSection, 'TotalSize'), 'Unknown')
# Look up current BDII values
srmDict = {}
seBdiiDict = {}
for site in srmBdiiDict:
if seHost in srmBdiiDict[site]:
srmDict = srmBdiiDict[site][seHost]['SRM']
seBdiiDict = srmBdiiDict[site][seHost]['SE']
break
if not srmDict or not seBdiiDict:
continue
newDescription = seBdiiDict.get('GlueSEName', 'Unknown')
newBackend = seBdiiDict.get('GlueSEImplementationName', 'Unknown')
newSize = seBdiiDict.get('GlueSESizeTotal', 'Unknown')
addToChangeSet((seSection, 'Description', description, newDescription), changeSet)
addToChangeSet((seSection, 'BackendType', backend, newBackend), changeSet)
addToChangeSet((seSection, 'TotalSize', size, newSize), changeSet)
# Evaluate VOs if no space token defined, otherwise this is VO specific
spaceToken = ''
for i in range(1, 10):
protocol = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'Protocol'), '')
if protocol.lower() == 'srm':
spaceToken = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'SpaceToken'), '')
break
if not spaceToken:
bdiiVOs = srmDict.get('GlueServiceAccessControlBaseRule', [])
bdiiVOs = set([re.sub('^VO:', '', rule) for rule in bdiiVOs])
seVOs = csVOs.intersection(bdiiVOs)
newVOs = ','.join(seVOs)
addToChangeSet((seSection, 'VO', vos, newVOs), changeSet)
return S_OK(changeSet)
def getDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
defaultHost is the host to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultPort is the port to return if the option is not found in the CS
defaultUser is the user to return if the option is not found in the CS.
Not usePassword is the password to return if the option is not found in
the CS.
Not used as the method will fail if it cannot be found
defaultDB is the db to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultQueueSize is the QueueSize to return if the option is not found in the
CS
Returns a dictionary with the keys: 'host', 'port', 'user', 'password',
'db' and 'queueSize'
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/Databases/Host')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Host')
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Mysql standard
dbPort = 3306
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/Databases/Port')
if result['OK']:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/Databases/User')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: User')
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/Databases/Password')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Password')
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/DBName')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: DBName')
dbName = result['Value']
parameters['DBName'] = dbName
return S_OK(parameters)
def getElasticDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Host')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Host. Using localhost")
dbHost = 'localhost'
else:
dbHost = result['Value']
else:
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Elasticsearch standard port
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Port')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Port. Using 9200")
dbPort = 9200
else:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/User')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: User. Assuming no user/password is provided/needed")
dbUser = None
else:
dbUser = result['Value']
else:
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Password')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Password. Assuming no user/password is provided/needed")
dbPass = None
else:
dbPass = result['Value']
else:
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/SSL')
if not result['OK']:
# No SSL option found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/SSL')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: SSL. Assuming SSL is needed")
ssl = True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
parameters['SSL'] = ssl
return S_OK(parameters)
| fstagni/DIRAC | ConfigurationSystem/Client/Utilities.py | Python | gpl-3.0 | 22,711 | [
"DIRAC"
] | 297d872f598eb610bdcb59d7041ac6949b6d60216686e0a2aad37f2dfc0ce591 |
'''
Importing pandasTools enables several features that allow for using RDKit molecules as columns of a Pandas dataframe.
If the dataframe is containing a molecule format in a column (e.g. smiles), like in this example:
>>> from rdkit.Chem import PandasTools
>>> import pandas as pd
>>> import os
>>> from rdkit import RDConfig
>>> antibiotics = pd.DataFrame(columns=['Name','Smiles'])
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C','Name':'Penicilline G'}, ignore_index=True)#Penicilline G
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4O)O)O)O)C(=O)N)N(C)C)O','Name':'Tetracycline'}, ignore_index=True)#Tetracycline
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O)O)C','Name':'Ampicilline'}, ignore_index=True)#Ampicilline
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles']
>>> print(antibiotics)
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
1 Tetracycline CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4...
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
a new column can be created holding the respective RDKit molecule objects. The fingerprint can be included to accelerate substructure searches on the dataframe.
>>> PandasTools.AddMoleculeColumnToFrame(antibiotics,'Smiles','Molecule',includeFingerprints=True)
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles', 'Molecule']
A substructure filter can be applied on the dataframe using the RDKit molecule column, because the ">=" operator has been modified to work as a substructure check.
Such the antibiotics containing the beta-lactam ring "C1C(=O)NC1" can be obtained by
>>> beta_lactam = Chem.MolFromSmiles('C1C(=O)NC1')
>>> beta_lactam_antibiotics = antibiotics[antibiotics['Molecule'] >= beta_lactam]
>>> print(beta_lactam_antibiotics[['Name','Smiles']])
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
It is also possible to load an SDF file can be load into a dataframe.
>>> sdfFile = os.path.join(RDConfig.RDDataDir,'NCI/first_200.props.sdf')
>>> frame = PandasTools.LoadSDF(sdfFile,smilesName='SMILES',molColName='Molecule',includeFingerprints=True)
>>> frame.info # doctest: +SKIP
<bound method DataFrame.info of <class 'pandas.core.frame.DataFrame'>
Int64Index: 200 entries, 0 to 199
Data columns:
AMW 200 non-null values
CLOGP 200 non-null values
CP 200 non-null values
CR 200 non-null values
DAYLIGHT.FPG 200 non-null values
DAYLIGHT_CLOGP 200 non-null values
FP 200 non-null values
ID 200 non-null values
ISM 200 non-null values
LIPINSKI_VIOLATIONS 200 non-null values
NUM_HACCEPTORS 200 non-null values
NUM_HDONORS 200 non-null values
NUM_HETEROATOMS 200 non-null values
NUM_LIPINSKIHACCEPTORS 200 non-null values
NUM_LIPINSKIHDONORS 200 non-null values
NUM_RINGS 200 non-null values
NUM_ROTATABLEBONDS 200 non-null values
P1 30 non-null values
SMILES 200 non-null values
Molecule 200 non-null values
dtypes: object(20)>
In order to support rendering the molecules as images in the HTML export of the dataframe, the __str__ method is monkey-patched to return a base64 encoded PNG:
>>> molX = Chem.MolFromSmiles('Fc1cNc2ccccc12')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
This can be reverted using the ChangeMoleculeRendering method
>>> ChangeMoleculeRendering(renderer='String')
>>> print(molX) # doctest: +SKIP
<rdkit.Chem.rdchem.Mol object at 0x10d179440>
>>> ChangeMoleculeRendering(renderer='PNG')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
'''
from __future__ import print_function
from base64 import b64encode
import types,copy
from rdkit.six import BytesIO, string_types
from rdkit import Chem
from rdkit.Chem import Draw
try:
import pandas as pd
v = pd.version.version.split('.')
if v[0]=='0' and int(v[1])<10:
pd = None
else:
if 'display.width' in pd.core.config._registered_options:
pd.set_option('display.width',1000000000)
if 'display.max_rows' in pd.core.config._registered_options:
pd.set_option('display.max_rows',1000000000)
elif 'display.height' in pd.core.config._registered_options:
pd.set_option('display.height',1000000000)
if 'display.max_colwidth' in pd.core.config._registered_options:
pd.set_option('display.max_colwidth',1000000000)
#saves the default pandas rendering to allow restauration
defPandasRendering = pd.core.frame.DataFrame.to_html
except Exception as e:
pd = None
highlightSubstructures=True
molRepresentation = 'png' # supports also SVG
molSize = (200,200)
def patchPandasHTMLrepr(self,**kwargs):
'''
Patched default escaping of HTML control characters to allow molecule image rendering dataframes
'''
formatter = pd.core.format.DataFrameFormatter(self,buf=None,columns=None,col_space=None,colSpace=None,header=True,index=True,
na_rep='NaN',formatters=None,float_format=None,sparsify=None,index_names=True,
justify = None, force_unicode=None,bold_rows=True,classes=None,escape=False)
formatter.to_html()
html = formatter.buf.getvalue()
return html
def patchPandasHeadMethod(self,n=5):
'''Ensure inheritance of patched to_html in "head" subframe
'''
df = self[:n]
df.to_html = types.MethodType(patchPandasHTMLrepr,df)
df.head = types.MethodType(patchPandasHeadMethod,df)
return df
def _get_image(x):
"""displayhook function for PIL Images, rendered as PNG"""
import pandas as pd
bio = BytesIO()
x.save(bio,format='PNG')
s = b64encode(bio.getvalue()).decode('ascii')
pd.set_option('display.max_columns',len(s)+1000)
pd.set_option('display.max_rows',len(s)+1000)
if len(s)+100 > pd.get_option("display.max_colwidth"):
pd.set_option("display.max_colwidth",len(s)+1000)
return s
def _get_svg_image(mol, size=(200,200), highlightAtoms=[]):
""" mol rendered as SVG """
from IPython.display import SVG
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
try:
# If no coordinates, calculate 2D
mol.GetConformer(-1)
except ValueError:
rdDepictor.Compute2DCoords(mol)
drawer = rdMolDraw2D.MolDraw2DSVG(*size)
drawer.DrawMolecule(mol,highlightAtoms=highlightAtoms)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:','')
return SVG(svg).data # IPython's SVG clears the svg text
from rdkit import DataStructs
try:
from rdkit.Avalon import pyAvalonTools as pyAvalonTools
_fingerprinter=lambda x,y:pyAvalonTools.GetAvalonFP(x,isQuery=y,bitFlags=pyAvalonTools.avalonSSSBits)
except ImportError:
_fingerprinter=lambda x,y:Chem.PatternFingerprint(x,fpSize=2048)
def _molge(x,y):
"""Allows for substructure check using the >= operator (X has substructure Y -> X >= Y) by
monkey-patching the __ge__ function
This has the effect that the pandas/numpy rowfilter can be used for substructure filtering (filtered = dframe[dframe['RDKitColumn'] >= SubstructureMolecule])
"""
if x is None or y is None: return False
if hasattr(x,'_substructfp'):
if not hasattr(y,'_substructfp'):
y._substructfp=_fingerprinter(y,True)
if not DataStructs.AllProbeBitsMatch(y._substructfp,x._substructfp):
return False
match = x.GetSubstructMatch(y)
if match:
if highlightSubstructures:
x.__sssAtoms=list(match)
else:
x.__sssAtoms=[]
return True
else:
return False
Chem.Mol.__ge__ = _molge # lambda x,y: x.HasSubstructMatch(y)
def PrintAsBase64PNGString(x,renderer = None):
'''returns the molecules as base64 encoded PNG image
'''
if highlightSubstructures and hasattr(x,'__sssAtoms'):
highlightAtoms=x.__sssAtoms
else:
highlightAtoms=[]
if molRepresentation.lower() == 'svg':
return _get_svg_image(x, highlightAtoms=highlightAtoms, size=molSize)
else:
return '<img src="data:image/png;base64,%s" alt="Mol"/>'%_get_image(Draw.MolToImage(x,highlightAtoms=highlightAtoms, size=molSize))
def PrintDefaultMolRep(x):
return str(x.__repr__())
#Chem.Mol.__str__ = lambda x: '<img src="data:image/png;base64,%s" alt="Mol"/>'%get_image(Draw.MolToImage(x))
Chem.Mol.__str__ = PrintAsBase64PNGString
def _MolPlusFingerprint(m):
'''Precomputes fingerprints and stores results in molecule objects to accelerate substructure matching
'''
#m = Chem.MolFromSmiles(smi)
if m is not None:
m._substructfp=_fingerprinter(m,False)
return m
def RenderImagesInAllDataFrames(images=True):
'''Changes the default dataframe rendering to not escape HTML characters, thus allowing rendered images in all dataframes.
IMPORTANT: THIS IS A GLOBAL CHANGE THAT WILL AFFECT TO COMPLETE PYTHON SESSION. If you want to change the rendering only
for a single dataframe use the "ChangeMoleculeRendering" method instead.
'''
if images:
pd.core.frame.DataFrame.to_html = patchPandasHTMLrepr
else:
pd.core.frame.DataFrame.to_html = defPandasRendering
def AddMoleculeColumnToFrame(frame, smilesCol='Smiles', molCol = 'ROMol',includeFingerprints=False):
'''Converts the molecules contains in "smilesCol" to RDKit molecules and appends them to the dataframe "frame" using the specified column name.
If desired, a fingerprint can be computed and stored with the molecule objects to accelerate substructure matching
'''
if not includeFingerprints:
frame[molCol]=frame[smilesCol].map(Chem.MolFromSmiles)
else:
frame[molCol]=frame[smilesCol].map(lambda smiles: _MolPlusFingerprint(Chem.MolFromSmiles(smiles)))
RenderImagesInAllDataFrames(images=True)
#frame.to_html = types.MethodType(patchPandasHTMLrepr,frame)
#frame.head = types.MethodType(patchPandasHeadMethod,frame)
def ChangeMoleculeRendering(frame=None, renderer='PNG'):
'''Allows to change the rendering of the molecules between base64 PNG images and string representations.
This serves two purposes: First it allows to avoid the generation of images if this is not desired and, secondly, it allows to enable image rendering for
newly created dataframe that already contains molecules, without having to rerun the time-consuming AddMoleculeColumnToFrame. Note: this behaviour is, because some pandas methods, e.g. head()
returns a new dataframe instance that uses the default pandas rendering (thus not drawing images for molecules) instead of the monkey-patched one.
'''
if renderer == 'String':
Chem.Mol.__str__ = PrintDefaultMolRep
else:
Chem.Mol.__str__ = PrintAsBase64PNGString
if frame is not None:
frame.to_html = types.MethodType(patchPandasHTMLrepr,frame)
def LoadSDF(filename, idName='ID',molColName = 'ROMol',includeFingerprints=False, isomericSmiles=False, smilesName=None, embedProps=False):
""" Read file in SDF format and return as Pandas data frame. If embedProps=True all properties also get embedded in Mol objects in the molecule column. """
df = None
if isinstance(filename, string_types):
if filename.lower()[-3:] == ".gz":
import gzip
f = gzip.open(filename, "rb")
else:
f = open(filename, 'rb')
close = f.close
else:
f = filename
close = None # don't close an open file that was passed in
records = []
indices = []
for i, mol in enumerate(Chem.ForwardSDMolSupplier(f)):
if mol is None: continue
row = dict((k, mol.GetProp(k)) for k in mol.GetPropNames())
if not embedProps:
for prop in mol.GetPropNames():
mol.ClearProp(prop)
if mol.HasProp('_Name'): row[idName] = mol.GetProp('_Name')
if smilesName is not None:
row[smilesName] = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
if not includeFingerprints:
row[molColName] = mol
else:
row[molColName] = _MolPlusFingerprint(mol)
records.append(row)
indices.append(i)
if close is not None: close()
RenderImagesInAllDataFrames(images=True)
return pd.DataFrame(records, index=indices)
from rdkit.Chem import SDWriter
def WriteSDF(df, out, molColName='ROMol', idName=None, properties=None, allNumeric=False):
'''Write an SD file for the molecules in the dataframe. Dataframe columns can be exported as SDF tags if specified in the "properties" list. "properties=list(df.columns)" would export all columns.
The "allNumeric" flag allows to automatically include all numeric columns in the output. User has to make sure that correct data type is assigned to column.
"idName" can be used to select a column to serve as molecule title. It can be set to "RowID" to use the dataframe row key as title.
'''
close = None
if isinstance(out, string_types):
if out.lower()[-3:] == ".gz":
import gzip
out = gzip.open(out, "wb")
close = out.close
writer = SDWriter(out)
if properties is None:
properties=[]
else:
properties=list(properties)
if allNumeric:
properties.extend([dt for dt in df.dtypes.keys() if (np.issubdtype(df.dtypes[dt],float) or np.issubdtype(df.dtypes[dt],int))])
if molColName in properties:
properties.remove(molColName)
if idName in properties:
properties.remove(idName)
writer.SetProps(properties)
for row in df.iterrows():
# make a local copy I can modify
mol = Chem.Mol(row[1][molColName])
if idName is not None:
if idName == 'RowID':
mol.SetProp('_Name',str(row[0]))
else:
mol.SetProp('_Name',str(row[1][idName]))
for p in properties:
cell_value = row[1][p]
# Make sure float does not get formatted in E notation
if np.issubdtype(type(cell_value),float):
s = '{:f}'.format(cell_value).rstrip("0") # "f" will show 7.0 as 7.00000
if s[-1] == ".":
s += "0" # put the "0" back on if it's something like "7."
mol.SetProp(p, s)
else:
mol.SetProp(p,str(cell_value))
writer.write(mol)
writer.close()
if close is not None: close()
_saltRemover = None
def RemoveSaltsFromFrame(frame, molCol = 'ROMol'):
'''
Removes salts from mols in pandas DataFrame's ROMol column
'''
global _saltRemover
if _saltRemover is None:
from rdkit.Chem import SaltRemover
_saltRemover = SaltRemover.SaltRemover()
frame[molCol] = frame.apply(lambda x: _saltRemover.StripMol(x[molCol]), axis = 1)
def SaveSMILESFromFrame(frame, outFile, molCol='ROMol', NamesCol='', isomericSmiles=False):
'''
Saves smi file. SMILES are generated from column with RDKit molecules. Column with names is optional.
'''
w = Chem.SmilesWriter(outFile, isomericSmiles=isomericSmiles)
if NamesCol != '':
for m,n in zip(frame[molCol], map(str,frame[NamesCol])):
m.SetProp('_Name',n)
w.write(m)
w.close()
else:
for m in frame[molCol]:
w.write(m)
w.close()
import numpy as np
import os
from rdkit.six.moves import cStringIO as StringIO
def SaveXlsxFromFrame(frame, outFile, molCol='ROMol', size=(300,300)):
"""
Saves pandas DataFrame as a xlsx file with embedded images.
It maps numpy data types to excel cell types:
int, float -> number
datetime -> datetime
object -> string (limited to 32k character - xlsx limitations)
Cells with compound images are a bit larger than images due to excel.
Column width weirdness explained (from xlsxwriter docs):
The width corresponds to the column width value that is specified in Excel.
It is approximately equal to the length of a string in the default font of Calibri 11.
Unfortunately, there is no way to specify "AutoFit" for a column in the Excel file format.
This feature is only available at runtime from within Excel.
"""
import xlsxwriter # don't want to make this a RDKit dependency
cols = list(frame.columns)
cols.remove(molCol)
dataTypes = dict(frame.dtypes)
workbook = xlsxwriter.Workbook(outFile) # New workbook
worksheet = workbook.add_worksheet() # New work sheet
worksheet.set_column('A:A', size[0]/6.) # column width
# Write first row with column names
c2 = 1
for x in cols:
worksheet.write_string(0, c2, x)
c2 += 1
c = 1
for index, row in frame.iterrows():
image_data = StringIO()
img = Draw.MolToImage(row[molCol], size=size)
img.save(image_data, format='PNG')
worksheet.set_row(c, height=size[1]) # looks like height is not in px?
worksheet.insert_image(c, 0, "f", {'image_data': image_data})
c2 = 1
for x in cols:
if str(dataTypes[x]) == "object":
worksheet.write_string(c, c2, str(row[x])[:32000]) # string length is limited in xlsx
elif ('float' in str(dataTypes[x])) or ('int' in str(dataTypes[x])):
if (row[x] != np.nan) or (row[x] != np.inf):
worksheet.write_number(c, c2, row[x])
elif 'datetime' in str(dataTypes[x]):
worksheet.write_datetime(c, c2, row[x])
c2 += 1
c += 1
workbook.close()
image_data.close()
def FrameToGridImage(frame, column = 'ROMol', legendsCol=None, **kwargs):
'''
Draw grid image of mols in pandas DataFrame.
'''
if legendsCol:
if legendsCol == frame.index.name:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame.index))), **kwargs)
else:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame[legendsCol]))), **kwargs)
else:
img = Draw.MolsToGridImage(frame[column], **kwargs)
return img
from rdkit.Chem.Scaffolds import MurckoScaffold
def AddMurckoToFrame(frame, molCol = 'ROMol', MurckoCol = 'Murcko_SMILES', Generic = False):
'''
Adds column with SMILES of Murcko scaffolds to pandas DataFrame. Generic set to true results in SMILES of generic framework.
'''
if Generic:
frame[MurckoCol] = frame.apply(lambda x: Chem.MolToSmiles(MurckoScaffold.MakeScaffoldGeneric(MurckoScaffold.GetScaffoldForMol(x[molCol]))), axis=1)
else:
frame[MurckoCol] = frame.apply(lambda x: Chem.MolToSmiles(MurckoScaffold.GetScaffoldForMol(x[molCol])), axis=1)
from rdkit.Chem import AllChem
def AlignMol(mol,scaffold):
"""
Aligns mol (RDKit mol object) to scaffold (SMILES string)
"""
scaffold = Chem.MolFromSmiles(scaffold)
AllChem.Compute2DCoords(scaffold)
AllChem.GenerateDepictionMatching2DStructure(mol,scaffold)
return mol
def AlignToScaffold(frame, molCol='ROMol', scaffoldCol='Murcko_SMILES'):
'''
Aligns molecules in molCol to scaffolds in scaffoldCol
'''
frame[molCol] = frame.apply(lambda x: AlignMol(x[molCol],x[scaffoldCol]), axis=1)
if __name__ == "__main__":
import sys
if pd is None:
print("pandas installation not found, skipping tests", file=sys.stderr)
else:
v = pd.version.version.split('.')
if v[0]=='0' and int(v[1])<10:
print("pandas installation >=0.10 not found, skipping tests",
file=sys.stderr)
else:
import doctest
failed,tried=doctest.testmod(optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE)
if failed:
sys.exit(failed)
# $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
| soerendip42/rdkit | rdkit/Chem/PandasTools.py | Python | bsd-3-clause | 21,266 | [
"RDKit"
] | 98f197ed91e4a6a3cedce01738a883c07e3bab1ecc21cd554a798ea1cbc5a26e |
import os.path
import requests
urls = []
names = []
def download_archive(url, local_filename):
if os.path.isfile(local_filename):
print "reusing",local_filename,"for",url
return
r = requests.get(url, stream=True)
if (r.status_code != 200):
print r.status_code, url
url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/" + local_filename
r = requests.get(url, stream=True)
if (r.status_code != 200):
print r.status_code, url
url = "http://bioconductor.org/packages/release/data/annotation/src/contrib/" + local_filename
r = requests.get(url, stream=True)
if (r.status_code != 200):
print r.status_code, url
print "Giving up can not find!"
exit(-1)
print local_filename,"coping from",url
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def process_url(url):
local_filename = url.split('/')[-1]
name = local_filename[:local_filename.find("_")]
if name in names:
print "repeat", name
else:
download_archive(url, local_filename)
github = "https://github.com/Christian-B/galaxy_shedtools/raw/master/r_extended/" + local_filename # ?raw=true
urls.append(github)
names.append(name)
def process_line(line):
if len(line) < 10:
return
end = line.find("</package")
if line.startswith("<package>"):
url = line[9:end]
elif line.startswith("<!--package>"):
url = line[12:end]
else:
print "opps", line
return
process_url(url)
if __name__ == '__main__':
file_name = "package_list.txt"
with open(file_name, 'r') as f:
for line in f:
process_line(line)
PACKAGE_XML_TEMPLATE = " <package>%s</package>\n"
with open("short_package_list.txt", 'w') as f:
for url in urls:
f.write(PACKAGE_XML_TEMPLATE % url )
LIBRARY_TEMPLATE = "library(%s)\n"
with open("packages.R", 'w') as f:
for name in names:
f.write(LIBRARY_TEMPLATE % name )
f.write("\nargs<-commandArgs(TRUE)\n")
f.write("writeLines(capture.output(sessionInfo()), args[1])\n")
f.write("sessionInfo()\n")
| Christian-B/galaxy_shedtools | r_extended/merger.py | Python | gpl-2.0 | 2,403 | [
"Bioconductor"
] | c2557727935c0e9fc554a18017c48c5d799506f5ebe26982726184b547b22f6d |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2012, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
from .classifier import normaliselabels, ctransforms_model
from collections import deque
import numpy
import numpy as np
import random
from . import _svm
__all__ = [
'rbf_kernel',
'polynomial_kernel',
'precomputed_kernel',
'dot_kernel',
'svm_raw',
'svm_binary',
'svm_to_binary',
'svm_sigmoidal_correction',
'sigma_value_fisher',
'fisher_tuned_rbf_svm',
]
def _svm_apply(SVM, q):
'''
f_i = _svm_apply(SVM, q)
@internal: This is mostly used for testing
'''
X,Y,Alphas,b,C,kernel=SVM
N = len(X)
s = 0.0
for i in xrange(N):
s += Alphas[i] * Y[i] * kernel(q, X[i])
return s - b
def svm_learn_smo(X,Y,kernel,C,eps=1e-4,tol=1e-2,cache_size=(1<<20)):
'''
Learn a svm classifier
X: data
Y: labels in SVM format (ie Y[i] in (1,-1))
This is a very raw interface. In general, you should use a class
like svm_classifier.
Implements the Sequential Minimum Optimisation Algorithm from Platt's
"Fast training of support vector machines using sequential minimal optimization"
in Advances in kernel methods: support vector learning
Pages: 185 - 208
Year of Publication: 1999
ISBN:0-262-19416-3
'''
assert numpy.all(numpy.abs(Y) == 1)
assert len(X) == len(Y)
N = len(Y)
Y = Y.astype(numpy.int32)
params = numpy.array([0,C,1e-3,1e-5],numpy.double)
Alphas0 = numpy.zeros(N, numpy.double)
_svm.eval_SMO(X,Y,Alphas0,params,kernel,cache_size)
return Alphas0, params[0]
def svm_learn_libsvm(features, labels, kernel, C, eps=1e-4, tol=1e-2, cache_size=(1<<20), alphas=None):
'''
Learn a svm classifier using LIBSVM optimiser
This is a very raw interface. In general, you should use a class
like svm_classifier.
This uses the LIBSVM optimisation algorithm
Parameters
----------
X : ndarray
data
Y : ndarray
labels in SVM format (ie Y[i] in (1,-1))
kernel : kernel
C : float
eps : float, optional
tol : float, optional
cache_size : int, optional
alphas : ndarray, optional
Returns
-------
alphas : ndarray
b : float
'''
if not np.all(np.abs(labels) == 1):
raise ValueError('milk.supervised.svm.svm_learn_libsvm: Y[i] != (-1,+1)')
assert len(features) == len(labels)
n = len(labels)
labels = labels.astype(np.int32)
p = -np.ones(n, np.double)
params = np.array([0,C,eps,tol], dtype=np.double)
if alphas is None:
alphas = np.zeros(n, np.double)
elif alphas.dtype != np.double or len(alphas) != n:
raise ValueError('milk.supervised.svm_learn_libsvm: alphas is in wrong format')
_svm.eval_LIBSVM(features, labels, alphas, p, params, kernel, cache_size)
return alphas, params[0]
class preprocessed_rbf_kernel(object):
def __init__(self, X, sigma, beta):
self.X = X
self.Xsum = (X**2).sum(1)
self.sigma = sigma
self.beta = beta
def __call__(self, q):
minus_d2_sigma = np.dot(self.X,q)
minus_d2_sigma *= 2.
minus_d2_sigma -= self.Xsum
minus_d2_sigma -= np.dot(q,q)
minus_d2_sigma /= self.sigma
return self.beta * np.exp(minus_d2_sigma)
class rbf_kernel(object):
'''
kernel = rbf_kernel(sigma,beta=1)
Radial Basis Function kernel
Returns a kernel (ie, a function that implements)
beta * exp( - ||x1 - x2|| / sigma)
'''
def __init__(self, sigma, beta=1):
self.sigma = sigma
self.beta = beta
self.kernel_nr_ = 0
self.kernel_arg_ = float(sigma)
def __call__(self, x1, x2):
d2 = x1 - x2
d2 **= 2
d2 = d2.sum()
res = self.beta*np.exp(-d2/self.sigma)
return res
def preprocess(self, X):
return preprocessed_rbf_kernel(X, self.sigma, self.beta)
class polynomial_kernel(object):
'''
kernel = polynomial_kernel(d,c=1)
returns a kernel (ie, a function) that implements:
(<x1,x2>+c)**d
'''
def __init__(self, d, c=1):
self.d = d
self.c = c
def __call__(self,x1,x2):
return (np.dot(x1,x2)+self.c)**self.d
class precomputed_kernel(object):
'''
kernel = precomputed_kernel(kmatrix)
A "fake" kernel which is precomputed.
'''
def __init__(self, kmatrix, copy=False):
kmatrix = np.ascontiguousarray(kmatrix, np.double, copy=copy)
self.kernel_nr_ = 1
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return kmatrix[x0,x1]
class _call_kernel(object):
def __init__(self, k, svs):
self.svs = svs
self.kernel = k
def __call__(self, q):
return np.array([self.kernel(s, q) for s in self.svs])
class preprocessed_dot_kernel(object):
def __init__(self, svs):
self.svs = svs
def __call__(self, x1):
return np.dot(self.svs, x1)
class dot_kernel(object):
def __init__(self):
self.kernel_nr_ = 2
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return np.dot(x0, x1)
def preprocess(self, svs):
return preprocessed_dot_kernel(svs)
class svm_raw_model(object):
def __init__(self, svs, Yw, b, kernel):
self.svs = svs
self.Yw = Yw
self.b = b
self.kernel = kernel
try:
self.kernelfunction = self.kernel.preprocess(self.svs)
except AttributeError:
self.kernelfunction = _call_kernel(self.kernel, self.svs)
def apply(self, q):
Q = self.kernelfunction(q)
return np.dot(Q, self.Yw) - self.b
class svm_raw(object):
'''
svm_raw: classifier
classifier = svm_raw(kernel, C, eps=1e-3, tol=1e-8)
Parameters
----------
kernel : the kernel to use.
This should be a function that takes two data arguments
see rbf_kernel and polynomial_kernel.
C : the C parameter
Other Parameters
----------------
eps : the precision to which to solve the problem (default 1e-3)
tol : (|x| < tol) is considered zero
'''
def __init__(self, kernel=None, C=1., eps=1e-3, tol=1e-8):
self.C = C
self.kernel = kernel
self.eps = eps
self.tol = tol
self.algorithm = 'libsvm'
def train(self, features, labels, normalisedlabels=False, **kwargs):
assert self.kernel is not None, 'milk.supervised.svm_raw.train: kernel not set!'
assert self.algorithm in ('libsvm','smo'), 'milk.supervised.svm_raw: unknown algorithm (%s)' % self.algorithm
assert not (np.isinf(self.C) or np.isnan(self.C)), 'milk.supervised.svm_raw: setting C to NaN or Inf causes problems.'
features = np.asanyarray(features)
if normalisedlabels:
Y = labels.copy()
else:
Y,_ = normaliselabels(labels)
assert Y.max() == 1, 'milk.supervised.svm_raw can only handle binary problems'
Y *= 2
Y -= 1
kernel = self.kernel
try:
kernel = (self.kernel.kernel_nr_, self.kernel.kernel_arg_)
features = np.ascontiguousarray(features, np.double)
except AttributeError:
pass
if self.algorithm == 'smo':
alphas,b = svm_learn_smo(features,Y,kernel,self.C,self.eps,self.tol)
else:
alphas,b = svm_learn_libsvm(features,Y,kernel,self.C,self.eps,self.tol)
svsi = (alphas != 0)
svs = features[svsi]
w = alphas[svsi]
Y = Y[svsi]
Yw = w * Y
return svm_raw_model(svs, Yw, b, self.kernel)
def get_params(self):
return self.C, self.eps,self.tol
def set_params(self,params):
self.C,self.eps,self.tol = params
def set_option(self, optname, value):
setattr(self, optname, value)
def learn_sigmoid_constants(F,Y,
max_iters=None,
min_step=1e-10,
sigma=1e-12,
eps=1e-5):
'''
A,B = learn_sigmoid_constants(F,Y)
This is a very low-level interface look into the svm_classifier class.
Parameters
----------
F : Values of the function F
Y : Labels (in boolean format, ie, in (0,1))
Other Parameters
----------------
max_iters : Maximum nr. of iterations
min_step : Minimum step
sigma : sigma
eps : A small number
Reference for Implementation
----------------------------
Implements the algorithm from "A Note on Platt's Probabilistic Outputs for
Support Vector Machines" by Lin, Lin, and Weng.
Machine Learning, Vol. 68, No. 3. (23 October 2007), pp. 267-276
'''
# Below we use safe constructs to avoid using the overflown values, but we
# must compute them because of the way numpy works.
errorstate = np.seterr(over='ignore')
# the deci[i] array is called F in this code
F = np.asanyarray(F)
Y = np.asanyarray(Y)
assert len(F) == len(Y)
assert numpy.all( (Y == 1) | (Y == 0) )
if max_iters is None:
max_iters = 1000
prior1 = Y.sum()
prior0 = len(F)-prior1
small_nr = 1e-4
hi_t = (prior1+1.)/(prior1+2.)
lo_t = 1./(prior0+2.)
T = Y*hi_t + (1-Y)*lo_t
A = 0.
B = np.log( (prior0+1.)/(prior1+1.) )
def target(A,B):
fApB = F*A + B
lef = np.log1p(np.exp(fApB))
lemf = np.log1p(np.exp(-fApB))
fvals = np.choose(fApB >= 0, ( T*fApB + lemf, (T-1.)*fApB + lef))
return np.sum(fvals)
fval = target(A,B)
for iter in xrange(max_iters):
fApB = F*A + B
ef = np.exp(fApB)
emf = np.exp(-fApB)
p = np.choose(fApB >= 0, ( emf/(1.+emf), 1./(1.+ef) ))
q = np.choose(fApB >= 0, ( 1/(1.+emf), ef/(1.+ef) ))
d2 = p * q
h11 = np.dot(F*F,d2) + sigma
h22 = np.sum(d2) + sigma
h21 = np.dot(F,d2)
d1 = T - p
g1 = np.dot(F,d1)
g2 = np.sum(d1)
if abs(g1) < eps and abs(g2) < eps: # Stopping criteria
break
det = h11*h22 - h21*h21
dA = - (h22*g1 - h21*g2)/det
dB = - (h21*g1 + h11*g2)/det
gd = g1*dA + g2*dB
stepsize = 1.
while stepsize >= min_step:
newA = A + stepsize*dA
newB = B + stepsize*dB
newf = target(newA,newB)
if newf < fval+eps*stepsize*gd:
A = newA
B = newB
fval = newf
break
stepsize /= 2
else:
print 'Line search fails'
break
np.seterr(**errorstate)
return A,B
class svm_binary_model(object):
def __init__(self, classes):
self.classes = classes
def apply(self,f):
return self.classes[f >= 0.]
class svm_binary(object):
'''
classifier = svm_binary()
model = classifier.train(features, labels)
assert model.apply(f) in labels
'''
def train(self, features, labels, normalisedlabels=False, **kwargs):
if normalisedlabels:
return svm_binary_model( (0,1) )
assert len(labels) >= 2, 'Cannot train from a single example'
names = sorted(set(labels))
assert len(names) == 2, 'milk.supervised.svm.svm_binary.train: Can only handle two class problems'
return svm_binary_model(names)
class svm_to_binary(object):
'''
svm_to_binary(base_svm)
A simple wrapper so that
svm_to_binary(base_svm)
is a model that takes the base_svm classifier and then binarises its model output.
NOTE: This class does the same job as::
ctransforms(base_svm, svm_binary())
'''
def __init__(self, svm_base):
'''
binclassifier = svm_to_binary(svm_base)
a classifier that binarises the output of svm_base.
'''
self.base = svm_base
def train(self, features, labels, **kwargs):
model = self.base.train(features, labels, **kwargs)
binary = svm_binary()
binary_model = binary.train(features, labels, **kwargs)
return ctransforms_model([model, binary_model])
def set_option(self, opt, value):
self.base.set_option(opt, value)
class svm_sigmoidal_correction_model(object):
def __init__(self, A, B):
self.A = A
self.B = B
def apply(self,features):
return 1./(1.+numpy.exp(features*self.A+self.B))
class svm_sigmoidal_correction(object):
'''
svm_sigmoidal_correction : a classifier
Sigmoidal approximation for obtaining a probability estimate out of the output
of an SVM.
'''
def __init__(self):
self.max_iters = None
def train(self, features, labels, **kwargs):
A,B = learn_sigmoid_constants(features,labels,self.max_iters)
return svm_sigmoidal_correction_model(A, B)
def get_params(self):
return self.max_iters
def set_params(self,params):
self.max_iters = params
def sigma_value_fisher(features,labels):
'''
f = sigma_value_fisher(features,labels)
value_s = f(s)
Computes a function which computes how good the value of sigma
is for the features. This function should be *minimised* for a
good value of sigma.
Parameters
-----------
features : features matrix as 2-ndarray.
Returns
-------
f : a function: float -> float
this function should be minimised for a good `sigma`
Reference
----------
Implements the measure in
"Determination of the spread parameter in the
Gaussian kernel for classification and regression"
by Wenjian Wanga, Zongben Xua, Weizhen Luc, and Xiaoyun Zhanga
'''
features = np.asanyarray(features)
xij = np.dot(features,features.T)
f2 = np.sum(features**2,1)
d = f2-2*xij
d = d.T + f2
N1 = (labels==0).sum()
N2 = (labels==1).sum()
C1 = -d[labels == 0][:,labels == 0]
C2 = -d[labels == 1][:,labels == 1]
C12 = -d[labels == 0][:,labels == 1]
C1 = C1.copy()
C2 = C2.copy()
C12 = C12.copy()
def f(sigma):
sigma = float(sigma)
N1 = C1.shape[0]
N2 = C2.shape[0]
if C12.shape != (N1,N2):
raise ValueError
C1v = np.sum(np.exp(C1/sigma))/N1
C2v = np.sum(np.exp(C2/sigma))/N2
C12v = np.sum(np.exp(C12/sigma))/N1/N2
return (N1 + N2 - C1v - C2v)/(C1v/N1+C2v/N2 - 2.*C12v)
return f
class fisher_tuned_rbf_svm(object):
'''
F = fisher_tuned_rbf_svm(sigmas, base)
Returns a wrapper classifier that uses RBF kernels automatically
tuned using sigma_value_fisher.
'''
def __init__(self, sigmas, base):
self.sigmas = sigmas
self.base = base
def train(self, features, labels, **kwargs):
f = sigma_value_fisher(features, labels)
fs = [f(s) for s in self.sigmas]
self.sigma = self.sigmas[np.argmin(fs)]
self.base.set_option('kernel',rbf_kernel(self.sigma))
return self.base.train(features, labels, **kwargs)
| arnaudsj/milk | milk/supervised/svm.py | Python | mit | 15,240 | [
"Gaussian"
] | 826b0062190b1ea0312e558a46fc1451a3c68046c882bbbe5f58efc2d925a5a0 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.orca import init_orca_context, stop_orca_context
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.sql import SparkSession
@pytest.fixture(autouse=True, scope='package')
def orca_context_fixture():
conf = {"spark.python.worker.reuse": "false"}
sc = init_orca_context(cores=8, conf=conf)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
spark = SparkSession(sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
yield
stop_orca_context()
| intel-analytics/BigDL | python/orca/test/bigdl/orca/learn/ray/tf/conftest.py | Python | apache-2.0 | 1,324 | [
"ORCA"
] | b50ca5a0cbee441b26b69514317a1a1749726b1665a84b5dc4b5c02d40ce82a1 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# plugnparse documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import plugnparse
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Plug-n-Parse'
copyright = u"2016, Brian Rossa"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = plugnparse.__version__
# The full version, including alpha/beta/rc tags.
release = plugnparse.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'plugnparsedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'plugnparse.tex',
u'Plug-n-Parse Documentation',
u'Brian Rossa', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'plugnparse',
u'Plug-n-Parse Documentation',
[u'Brian Rossa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'plugnparse',
u'Plug-n-Parse Documentation',
u'Brian Rossa',
'plugnparse',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| brianthelion/plugnparse | docs/conf.py | Python | mit | 8,430 | [
"Brian"
] | 9907dc87d35e38f51fa12e1819539a8d8a7b402981a90b007c168e9f4b5ed7c4 |
# Try to import required packages/modules
import os
import netCDF4 as nc4
import warnings
import numpy
warnings.simplefilter('error', UserWarning)
debug = False # Global debugging
def openNetCDFfileForReading(fileName):
"""
Return Dataset type for file to read.
"""
try: rg = nc4.Dataset(fileName, 'r')
except:
if os.path.isfile(fileName): raise Exception('There was a problem opening "'+fileName+'".')
raise Exception('Could not find file "'+fileName+'".')
return rg
def dump(fileName):
"""
A succinct dump of a netCDF4 file.
"""
if isinstance(fileName, nc4.Dataset):
closeWhenDone = False
rg = fileName
else:
closeWhenDone = True
rg = openNetCDFfileForReading(fileName)
dims = rg.dimensions; vars = rg.variables
if not isinstance(fileName,nc4.Dataset): print('Summary of %s:'%fileName)
def allAttributes(obj):
attributes = {}
for a in obj.ncattrs():
o = obj.getncattr(a)
if isinstance(o,str): o = o.encode('ascii','ignore')
attributes[a.encode('ascii','ignore')] = o
if len(attributes): return attributes
return None
print('Attributes:',allAttributes(rg))
print('Dimensions: -------------------------------------')
for dim in dims:
oString = ' '+dim+' ['+str(len( dims[dim] ))+']'
if dim in vars:
n = len( dims[dim] ); obj = rg.variables[dim]
if n>5: oString += ' = '+str(obj[0])+'...'+str(obj[n-1])
else: oString += ' = '+str(obj[:])
if 'long_name' in obj.ncattrs(): oString += ' "'+obj.long_name+'"'
if 'units' in obj.ncattrs(): oString += ' ('+obj.units+')'
print(oString)
print('Variables: --------------------------------------')
for var in vars:
#if var in dims: continue # skip listing dimensions as variables
oString = ' '+var+' [ '; dString = ''
obj = vars[var]; varDims = obj.dimensions
for dim in varDims:
if len(dString)>0: dString += ', '
dString += dim+'['+str(len( dims[dim] ))+']'
oString += dString+' ]'
if 'long_name' in obj.ncattrs(): oString += ' "'+obj.long_name+'"'
if 'units' in obj.ncattrs(): oString += ' ('+obj.units+')'
print(oString)
print(' attributes:',allAttributes(obj))
if closeWhenDone: rg.close()
def readVar(fileName, variableName, *args, **kwargs):
"""
Reads a variable from a netCDF file.
Optional arguments are ranges for each dimension.
Missing ranges fetch the entire dimensions.
Returns: data, dimensions, attributes.
data will be a numpy masked array
dimensions will be a list of numpy 1D vectors
attributes will be a dictionary
Examples:
>>> T,_,_ = nccf.readVar('test.nc','xyz')
>>> T,dims,atts = nccf.readVar('test.nc','xyz',rang(1,4),3)
"""
if isinstance(fileName, nc4.Dataset):
closeWhenDone = False
rg = fileName
else:
closeWhenDone = True
rg = openNetCDFfileForReading(fileName)
if not variableName:
print('No variable name specified! Specify a varible from the following summary of "'\
+fileName+'":\n')
dump(fileName)
exit(0)
dtype = kwargs.setdefault('dtype','float64')
# Check that the variable is in the file (allowing for case mismatch)
for v in rg.variables:
if variableName.lower() == v.lower(): variableName=v ; break
if not variableName in rg.variables:
raise MyError('Did not find "'+variableName+'" in file "'+fileName+'".')
vh = rg.variables[variableName] # Handle for variable
dimensions = []
for n, d in enumerate(vh.dimensions):
if n < len(args):
if d in rg.variables: dimensions.append( rg.variables[d][args[n]] )
else: dimensions.append( args[n] )
else:
if d in rg.variables: dimensions.append( numpy.asarray(rg.variables[d][:], dtype=dtype) )
else: dimensions.append( list(range( len(rg.dimensions[d] ))) )
attributes = {}
for a in vh.ncattrs():
attributes[a.encode('ascii','ignore')] = vh.getncattr(a)
data = numpy.ma.asarray(vh[args][:], dtype=dtype)
if closeWhenDone: rg.close()
return data, dimensions, attributes
def openNetCDFfileForWriting(fileName):
"""
Return Dataset type for file to write.
"""
try:
if os.path.isfile(fileName): rg = nc4.Dataset(fileName,'a')
else: rg = nc4.Dataset(fileName,'w')
except:
if os.path.isfile(fileName): raise Exception('There was a problem opening "'+fileName+'" for appending.')
raise Exception('There was a problem creating "'+fileName+'".')
return rg
def write(fileName, variableName=None, variable=None, dimensions=None, attributes=None, dataType='f8', fillValue=None, clobber=False, record=None):
"""
Writes a variable to a netCDF file.
Arguments:
fileName name of the file
variableName name of variable to appear in the file
variable a numpy masked array
dimensions a dictionary of dimension names and 1D dimension data
or a list of names, or a list of 1D dimension data
attributes a dictionary of attributes
Optional arguments:
dataType data type for variable (default 'f8')
fillValue the fill value (default None)
clobber if True will remove file before writing
record if present, specifies record number of unlimited dimension to write
Examples:
>>> nccf.write('test.nc','Temp',T)
"""
if isinstance(fileName, nc4.Dataset):
closeWhenDone = False
if clobber: raise Exception('clobber is incompatible with passing a root-group as an argument')
rg = fileName
else:
closeWhenDone = True
if clobber:
try: os.remove(fileName)
except: pass
rg = openNetCDFfileForWriting(fileName)
def createDimIfMissing(rg, name, size):
if name in rg.dimensions:
if not rg.dimensions[name].isunlimited():
if not len(rg.dimensions[name])==size:
raise Exception('Dimension "%s" has size %i in file and differs from provided size %i'
%(name, len(rg.dimensions[name]), size))
else:
rg.createDimension(name, size)
return name
def createDimDataIfMissing(rg, name, data, dataType):
if data==None: createDimIfMissing(rg, name, None)
else: createDimIfMissing(rg, name, len(data))
if name in rg.variables:
if any(rg.variables[name][:]!=data):
raise Exception('Dimension data "%s" does not match provided data'%name)
else:
rg.createVariable(name, dataType, name)
rg.variables[name][:] = data
return name
def matchingDimsByData(rg, data):
matchingDims = []
for d in rg.dimensions:
if d in rg.variables:
if rg.variables[d].shape==data.shape:
if not any(rg.variables[d][:]!=data): matchingDims.append(d)
if len(matchingDims)>1: raise Exception('Too many dimension-data match the provided data')
elif len(matchingDims)==0: return None
return matchingDims[0]
def dimIsUnlimited(rg, dim):
if dim in rg.dimensions:
if rg.dimensions[dim].isunlimited(): return True
return False
variableDimensions = None
if dimensions==None:
if not variable==None and (isinstance(variable, numpy.ma.core.MaskedArray) or isinstance(variable, numpy.ndarray)):
# Create or match some simple dimensions with made up names
variableDimensions = []
for i in range(len(variable.shape)):
matchingDims = [d for d in rg.dimensions if len(rg.dimensions[d])==variable.shape[i] and not d in variableDimensions]
if len(matchingDims)>1: raise Exception(
'Too many matching-length dimensions to choose from. Please provide specific dimensions')
elif len(matchingDims)==1:
variableDimensions.append( createDimIfMissing(rg, matchingDims[0], variable.shape[i]) )
else:
variableDimensions.append( createDimIfMissing(rg, 'dim%i'%i, variable.shape[i]) )
elif isinstance(dimensions, list):
if not variable==None:
# Create or match dimensions based on names or vectors
variableDimensions = []
if isinstance(dimensions[0], str) and dimIsUnlimited(rg, dimensions[0]):
variable = variable.reshape((1,)+variable.shape)
for i,dim in enumerate(dimensions):
if isinstance(dim, str):
variableDimensions.append( createDimIfMissing(rg, dim, variable.shape[i]) )
elif isinstance(dim, numpy.ndarray):
dName = matchingDimsByData(rg, dim)
if dName==None: dName = 'dim%i'%i
variableDimensions.append( createDimDataIfMissing(rg, dName, dim, dataType) )
elif len(numpy.atleast_1d(dim))==1: print('Ignoring singleton dimension with value',dim)
else: print('******* Not sure what to do with dimension =',dim)
elif isinstance(dimensions, dict):
# Create dimensions from dictionary provided
variableDimensions = []
for n in dimensions:
variableDimensions.append( createDimDataIfMissing(rg, n, dimensions[n], dataType) )
else: raise Exception('Not sure what to do with the dimensions argument!')
if not variableName==None:
if variableName in rg.variables: vh = rg.variables[variableName]
elif not variableDimensions==None: vh = rg.createVariable(variableName, dataType, variableDimensions, fill_value=fillValue)
else: vh = None
if not attributes==None:
if not vh==None:
for a in attributes:
if not a in ['_FillValue']:
vh.setncattr(a,attributes[a])
else:
for a in attributes:
rg.setncattr(a,attributes[a])
if not variable==None and not vh==None:
if not record==None:
if len(vh.shape)==1: vh[record] = variable
else: vh[record,:] = variable
else: vh[:] = variable
if closeWhenDone: rg.close
def testNCCF():
"""
A simple test of writing a netcdf file
"""
import nccf
testFile = 'baseline.1900-1909.salt_temp_e.nc'
dump(testFile)
print('======= dump finished') ; print()
T, d, a = nccf.readVar(testFile,'Temp',0,4,list(range(580,593)),list(range(40,50)))
print('T=',T)
print('d=',d)
print('a=',a)
print('======= read T finished') ; print()
os.remove('q.nc')
print('Testing creation with dictionary dimensions')
nccf.write('q.nc', 'w1', -T, dimensions={'y':d[-2],'x':d[-1]})
dump('q.nc')
print('Testing creation with just data dimensions')
nccf.write('q.nc', 'w1', T, dimensions=d)
dump('q.nc')
print('Testing creation with just named dimensions')
nccf.write('q.nc', 'w1', -T, dimensions=['y','x'])
dump('q.nc')
print('Testing creation with no dimensions')
nccf.write('q.nc', 'w1', T)
dump('q.nc')
print('Testing creation with just attributes')
nccf.write('q.nc', 'w1', attributes=a)
dump('q.nc')
print('======= write T finished') ; print()
print('Testing creation with global attributes and clobber')
nccf.write('q.nc', attributes={'testAtt':-1.23, 'stringAtt':'qwerty'}, clobber=True)
dump('q.nc')
print('======= clobber finished') ; print()
print('Testing creating unlimited dimension with attributes')
rg = openNetCDFfileForWriting('q.nc')
nccf.write(rg, 'time', dimensions={'time':None}, attributes={'axis':'T', 'long_name':'Time in seconds', 'units':'seconds'})
nccf.write(rg, 'it', d[-1], dimensions=['it'])
nccf.write(rg, 'jt', d[-2], dimensions=['jt'])
nccf.write(rg, 'Temp', T, dimensions=['time','jt','it'])
nccf.write(rg, 'time', 43200., record=0)
nccf.write(rg, 'time', 86400., record=1)
nccf.write(rg, 'Temp', T, dimensions=['time','jt','it'], record=1)
dump(rg)
rg.close()
print('======= unlimited finished') ; print()
def enableDebugging(newValue=True):
"""
Sets the global parameter "debug" to control debugging information. This function is needed for
controlling debugging of routine imported from gplot.py in other scripts.
"""
global debug
debug = newValue
# Invoke parseCommandLine(), the top-level prodedure
if __name__ == '__main__': testNCCF()
| adcroft/pyGVtools | nccf.py | Python | gpl-2.0 | 11,789 | [
"NetCDF"
] | eb9979203a9fba98acad544eba2aa9aac8291003185b65a34aac2e9b16646dc1 |
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module parses the feature definition file features.def
#
import fileinput, string, re
class SyntaxError:
def __init__(self, message, instead):
self.message = message
self.filename = fileinput.filename()
self.lineno = fileinput.filelineno()
self.instead = instead
def __str__(self):
return '%s: %2d: %s in the following line:\n%s' % \
(self.filename, self.lineno, self.message, self.instead)
def toCPPExpr(expr):
expr = expr.replace('and', ' && ')
expr = expr.replace('or', ' || ')
expr = expr.replace('not', ' !')
expr = re.sub('([A-Z0-9_]+)', 'defined(\\1)', expr)
return expr
class defs:
def __init__(self, filename):
# complete set of all defined features
allfeatures = set()
# allfeatures minus externals and derived
features = set()
# list of implications (pairs of feature -> implied feature)
implications = list()
# list of requirements (pairs of feature -> requirement expr)
requirements = list()
# set of derived features
derived = set()
# list of derivations (pairs of feature -> derivation expr)
derivations = list()
# list of external features
externals = set()
# list of features that are to be tested
notestfeatures = set()
for line in fileinput.input(filename):
line = line.strip()
# Ignore empty and comment lines
if len(line) == 0 or line.startswith('#') \
or line.startswith('//') or line.startswith('/*'): continue
# Tokenify line
tokens = line.split(None, 2)
# Register the feature
feature = tokens.pop(0)
allfeatures.add(feature)
# get the keyword
if len(tokens) > 0:
keyword = tokens.pop(0)
if len(tokens) == 0:
rest = None
else:
rest = tokens[0]
# derived
if keyword == 'equals':
if rest is None:
raise SyntaxError("<feature> equals <expr>", line)
if feature in derived:
raise SyntaxError("Derived feature is already defined above:", line);
if feature in externals:
raise SyntaxError("Derived feature is already defined as external above:", line);
derived.add(feature)
derivations.append((feature, rest, toCPPExpr(rest)))
# externals
elif keyword == 'external':
if rest is not None:
raise SyntaxError("<feature> external", line)
if feature in derived:
raise SyntaxError("External feature is already defined as derived above:", line);
implied = set(map((lambda x_y:x_y[1]), implications))
if feature in implied:
raise SyntaxError("External feature is implied above:", line);
externals.add(feature)
# implications
elif keyword == 'implies':
if rest is None:
raise SyntaxError("<feature> implies [<feature>...]", line)
tokens = rest.split()
for implied in tokens:
if implied.endswith(','): implied = implied[:-1]
if implied in externals:
raise SyntaxError("Implied feature %s is already defined as external above:" % feature, line);
implications.append((feature, implied))
# requires
elif keyword == 'requires':
if rest is None:
raise SyntaxError("<feature> requires <expr>", line)
requirements.append((feature, rest, toCPPExpr(rest)))
elif keyword == 'notest':
if rest is not None:
raise SyntaxError("<feature> notest", line)
notestfeatures.add(feature)
features = allfeatures.difference(derived)
features = features.difference(externals)
self.allfeatures = allfeatures
self.features = features
self.requirements = requirements
self.implications = implications
self.derived = derived
self.derivations = derivations
self.externals = externals
self.notestfeatures = notestfeatures
def check_validity(self, activated):
"""Check whether a set of features is valid.
Returns None if it is not and the set of features including implied features if it is.
"""
newset = activated.copy()
# print "Verifying: " + str(activated) + "..."
# handle implications
for feature, implied in self.implications:
# print feature, ' -> ', implied
if feature in newset and not implied in newset:
newset.add(implied)
# print 'Implied set: ' + str(newset)
# handle requirements
featurevars=dict()
derived = list(map((lambda x_y_z:x_y_z[0]), self.derivations))
allfeatures = self.features.union(derived, self.externals)
for feature in allfeatures:
featurevars[feature] = feature in newset
for feature, expr, undef in self.requirements:
# print 'Requirement: ', feature, ' -> ', expr
if feature in newset:
if not eval(expr, featurevars):
return None
# print 'Resulting set: ' + str(newset)
return newset
# Test whether all implied features or features in an expression are defined
| Marcello-Sega/espresso | src/featuredefs.py | Python | gpl-3.0 | 6,617 | [
"ESPResSo"
] | f73c45035f93894f5b55667a862639b65a8f6be3c547017a77e5f3a51d6463f4 |
# This code is licensed under the New BSD License
# 2009, Alexander Artemenko <[email protected]>
# For other contacts, visit http://aartemenko.com
import os
import sys
try:
from elixir import metadata, setup_all, create_all, session
from models import *
from sqlalchemy.sql import not_
except ImportError:
"""Ignore this imports because they may fail
during __version__ import."""
from utils import get_or_create, make_list
from pdb import set_trace
from exceptions import *
__version__ = '0.1.3'
__all__ = ['GTD']
class GTD(object):
def __init__(self, filename):
db_url = 'sqlite:///' + filename
metadata.bind = db_url
#metadata.bind.echo = True
setup_all()
if not os.path.exists(filename):
create_all()
def addTask(self, title, note = None, tags = [], priority = 1):
task = Task(
title = title,
note = note,
tags = self._createTags(tags),
priority = priority)
session.commit()
return task
def getTaskById(self, task_id):
return Task.query.get(task_id)
def getTasks(self, tags = [], without_tags = [], show = 'open'):
assert(show in ('all', 'open', 'closed'))
query = Task.query
tags = make_list(tags)
for tag in tags:
query = query.filter(Task.tags.any(title = tag))
without_tags = make_list(without_tags)
for tag in without_tags:
query = query.filter(not_(Task.tags.any(title = tag)))
if show != 'all':
query = query.filter_by(done = (show == 'closed'))
return query.all()
def getTags(self):
return Tag.query.all()
def deleteTag(self, tag):
"""Delete tag by id or name"""
if isinstance(tag, basestring):
tag = Tag.query.filter_by(title = tag).one()
else:
tag = Tag.query.filter_by(id = tag).one()
tag.delete()
session.commit()
def getTagsRelated(self, tags):
tags = make_list(tags)
tasks = session.query(Task.id).filter(
Task.tags.any(Tag.title.in_(tags)))
task_ids = [t[0] for t in tasks]
new_tags = Tag.query.filter(Tag.tasks.any(Task.id.in_(task_ids))) \
.filter(not_(Tag.title.in_(tags)))
return new_tags.all()
def removeAll(self):
for task in Task.query.all():
task.delete()
Tag.query.delete()
session.commit()
def save(self, obj):
'''Updates object and commit session.'''
obj.update()
session.commit()
def closeTask(self, task_id):
task = self.getTaskById(task_id)
if task is None:
raise TaskNotFound
task.done = True
self.save(task)
def deleteTask(self, task_id):
task = self.getTaskById(task_id)
session.delete(task)
session.commit()
def _createTags(self, tags):
return [get_or_create(Tag, title = tag) for tag in tags]
| svetlyak40wt/gtdzen | gtdzen/__init__.py | Python | bsd-3-clause | 3,063 | [
"VisIt"
] | 1799bbdc4291a95b449e78684b88f70bc91f1bbb4ceac188273ecfb94ebf201f |
#!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import cmd
import glob
import os
import time
import sys
import subprocess
import codecs
import argparse
import locale
import logging
import traceback
from . import printcore
from printrun_utils import install_locale, run_command, \
format_time, format_duration, RemainingTimeEstimator, \
get_home_pos, parse_build_dimensions
install_locale('pronterface')
#rom printrun import gcoder
from functools import wraps
if os.name == "nt":
try:
import _winreg
except:
pass
READLINE = True
try:
import readline
try:
readline.rl.mode.show_all_if_ambiguous = "on" # config pyreadline on windows
except:
pass
except:
READLINE = False # neither readline module is available
def dosify(name):
return os.path.split(name)[1].split(".")[0][:8] + ".g"
def setting_add_tooltip(func):
@wraps(func)
def decorator(self, *args, **kwargs):
widget = func(self, *args, **kwargs)
helptxt = self.help or ""
sep, deftxt = "", ""
if len(helptxt):
sep = "\n"
if helptxt.find("\n") >= 0:
sep = "\n\n"
if self.default is not "":
deftxt = _("Default: ")
resethelp = _("(Control-doubleclick to reset to default value)")
if len(repr(self.default)) > 10:
deftxt += "\n " + repr(self.default).strip("'") + "\n" + resethelp
else:
deftxt += repr(self.default) + " " + resethelp
helptxt += sep + deftxt
if len(helptxt):
widget.SetToolTipString(helptxt)
return widget
return decorator
class Setting(object):
DEFAULT_GROUP = "Printer"
hidden = False
def __init__(self, name, default, label = None, help = None, group = None):
self.name = name
self.default = default
self._value = default
self.label = label
self.help = help
self.group = group if group else Setting.DEFAULT_GROUP
def _get_value(self):
return self._value
def _set_value(self, value):
raise NotImplementedError
value = property(_get_value, _set_value)
def set_default(self, e):
import wx
if e.CmdDown() and e.ButtonDClick() and self.default is not "":
confirmation = wx.MessageDialog(None, _("Are you sure you want to reset the setting to the default value: {0!r} ?").format(self.default), _("Confirm set default"), wx.ICON_EXCLAMATION | wx.YES_NO | wx.NO_DEFAULT)
if confirmation.ShowModal() == wx.ID_YES:
self._set_value(self.default)
else:
e.Skip()
@setting_add_tooltip
def get_label(self, parent):
import wx
widget = wx.StaticText(parent, -1, self.label or self.name)
widget.set_default = self.set_default
return widget
@setting_add_tooltip
def get_widget(self, parent):
return self.get_specific_widget(parent)
def get_specific_widget(self, parent):
raise NotImplementedError
def update(self):
raise NotImplementedError
def __str__(self):
return self.name
def __repr__(self):
return self.name
class HiddenSetting(Setting):
hidden = True
def _set_value(self, value):
self._value = value
value = property(Setting._get_value, _set_value)
class wxSetting(Setting):
widget = None
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(value)
value = property(Setting._get_value, _set_value)
def update(self):
self.value = self.widget.GetValue()
class StringSetting(wxSetting):
def get_specific_widget(self, parent):
import wx
self.widget = wx.TextCtrl(parent, -1, str(self.value))
return self.widget
class ComboSetting(wxSetting):
def __init__(self, name, default, choices, label = None, help = None, group = None):
super(ComboSetting, self).__init__(name, default, label, help, group)
self.choices = choices
def get_specific_widget(self, parent):
import wx
self.widget = wx.ComboBox(parent, -1, str(self.value), choices = self.choices, style = wx.CB_DROPDOWN)
return self.widget
class SpinSetting(wxSetting):
def __init__(self, name, default, min, max, label = None, help = None, group = None):
super(SpinSetting, self).__init__(name, default, label, help, group)
self.min = min
self.max = max
def get_specific_widget(self, parent):
import wx
self.widget = wx.SpinCtrl(parent, -1, min = self.min, max = self.max)
self.widget.SetValue(self.value)
return self.widget
class FloatSpinSetting(SpinSetting):
def get_specific_widget(self, parent):
from wx.lib.agw.floatspin import FloatSpin
self.widget = FloatSpin(parent, -1, value = self.value, min_val = self.min, max_val = self.max, digits = 2)
return self.widget
class BooleanSetting(wxSetting):
def _get_value(self):
return bool(self._value)
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(bool(value))
value = property(_get_value, _set_value)
def get_specific_widget(self, parent):
import wx
self.widget = wx.CheckBox(parent, -1)
self.widget.SetValue(bool(self.value))
return self.widget
class StaticTextSetting(wxSetting):
def __init__(self, name, label = " ", text = "", help = None, group = None):
super(StaticTextSetting, self).__init__(name, "", label, help, group)
self.text = text
def update(self):
pass
def _get_value(self):
return ""
def _set_value(self, value):
pass
def get_specific_widget(self, parent):
import wx
self.widget = wx.StaticText(parent, -1, self.text)
return self.widget
class BuildDimensionsSetting(wxSetting):
widgets = None
def _set_value(self, value):
self._value = value
if self.widgets:
self._set_widgets_values(value)
value = property(wxSetting._get_value, _set_value)
def _set_widgets_values(self, value):
build_dimensions_list = parse_build_dimensions(value)
for i in range(len(self.widgets)):
self.widgets[i].SetValue(build_dimensions_list[i])
def get_widget(self, parent):
from wx.lib.agw.floatspin import FloatSpin
import wx
build_dimensions = parse_build_dimensions(self.value)
self.widgets = []
w = lambda val, m, M: self.widgets.append(FloatSpin(parent, -1, value = val, min_val = m, max_val = M, digits = 2))
addlabel = lambda name, pos: self.widget.Add(wx.StaticText(parent, -1, name), pos = pos, flag = wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, border = 5)
addwidget = lambda *pos: self.widget.Add(self.widgets[-1], pos = pos, flag = wx.RIGHT, border = 5)
self.widget = wx.GridBagSizer()
addlabel(_("Width"), (0, 0))
w(build_dimensions[0], 0, 2000)
addwidget(0, 1)
addlabel(_("Depth"), (0, 2))
w(build_dimensions[1], 0, 2000)
addwidget(0, 3)
addlabel(_("Height"), (0, 4))
w(build_dimensions[2], 0, 2000)
addwidget(0, 5)
addlabel(_("X offset"), (1, 0))
w(build_dimensions[3], -2000, 2000)
addwidget(1, 1)
addlabel(_("Y offset"), (1, 2))
w(build_dimensions[4], -2000, 2000)
addwidget(1, 3)
addlabel(_("Z offset"), (1, 4))
w(build_dimensions[5], -2000, 2000)
addwidget(1, 5)
addlabel(_("X home pos."), (2, 0))
w(build_dimensions[6], -2000, 2000)
self.widget.Add(self.widgets[-1], pos = (2, 1))
addlabel(_("Y home pos."), (2, 2))
w(build_dimensions[7], -2000, 2000)
self.widget.Add(self.widgets[-1], pos = (2, 3))
addlabel(_("Z home pos."), (2, 4))
w(build_dimensions[8], -2000, 2000)
self.widget.Add(self.widgets[-1], pos = (2, 5))
return self.widget
def update(self):
values = [float(w.GetValue()) for w in self.widgets]
self.value = "%.02fx%.02fx%.02f%+.02f%+.02f%+.02f%+.02f%+.02f%+.02f" % tuple(values)
class Settings(object):
#def _temperature_alias(self): return {"pla":210, "abs":230, "off":0}
#def _temperature_validate(self, v):
# if v < 0: raise ValueError("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0.")
#def _bedtemperature_alias(self): return {"pla":60, "abs":110, "off":0}
def _baudrate_list(self): return ["2400", "9600", "19200", "38400", "57600", "115200", "250000"]
def __init__(self):
# defaults here.
# the initial value determines the type
self._add(StringSetting("port", "", _("Serial port"), _("Port used to communicate with printer")))
self._add(ComboSetting("baudrate", 115200, self._baudrate_list(), _("Baud rate"), _("Communications Speed")))
self._add(SpinSetting("bedtemp_abs", 110, 0, 400, _("Bed temperature for ABS"), _("Heated Build Platform temp for ABS (deg C)"), "Printer"))
self._add(SpinSetting("bedtemp_pla", 60, 0, 400, _("Bed temperature for PLA"), _("Heated Build Platform temp for PLA (deg C)"), "Printer"))
self._add(SpinSetting("temperature_abs", 230, 0, 400, _("Extruder temperature for ABS"), _("Extruder temp for ABS (deg C)"), "Printer"))
self._add(SpinSetting("temperature_pla", 185, 0, 400, _("Extruder temperature for PLA"), _("Extruder temp for PLA (deg C)"), "Printer"))
self._add(SpinSetting("xy_feedrate", 3000, 0, 50000, _("X && Y manual feedrate"), _("Feedrate for Control Panel Moves in X and Y (mm/min)"), "Printer"))
self._add(SpinSetting("z_feedrate", 200, 0, 50000, _("Z manual feedrate"), _("Feedrate for Control Panel Moves in Z (mm/min)"), "Printer"))
self._add(SpinSetting("e_feedrate", 100, 0, 1000, _("E manual feedrate"), _("Feedrate for Control Panel Moves in Extrusions (mm/min)"), "Printer"))
self._add(StringSetting("slicecommand", "python skeinforge/skeinforge_application/skeinforge_utilities/skeinforge_craft.py $s", _("Slice command"), _("Slice command"), "External"))
self._add(StringSetting("sliceoptscommand", "python skeinforge/skeinforge_application/skeinforge.py", _("Slicer options command"), _("Slice settings command"), "External"))
self._add(StringSetting("final_command", "", _("Final command"), _("Executable to run when the print is finished"), "External"))
self._add(StringSetting("error_command", "", _("Error command"), _("Executable to run when an error occurs"), "External"))
self._add(HiddenSetting("project_offset_x", 0.0))
self._add(HiddenSetting("project_offset_y", 0.0))
self._add(HiddenSetting("project_interval", 2.0))
self._add(HiddenSetting("project_pause", 2.5))
self._add(HiddenSetting("project_scale", 1.0))
self._add(HiddenSetting("project_x", 1024))
self._add(HiddenSetting("project_y", 768))
self._add(HiddenSetting("project_projected_x", 150.0))
self._add(HiddenSetting("project_direction", "Top Down"))
self._add(HiddenSetting("project_overshoot", 3.0))
self._add(HiddenSetting("project_z_axis_rate", 200))
self._add(HiddenSetting("project_layer", 0.1))
self._add(HiddenSetting("project_prelift_gcode", ""))
self._add(HiddenSetting("project_postlift_gcode", ""))
self._add(HiddenSetting("pause_between_prints", True))
self._add(HiddenSetting("default_extrusion", 5.0))
self._add(HiddenSetting("last_extrusion", 5.0))
_settings = []
def __setattr__(self, name, value):
if name.startswith("_"):
return object.__setattr__(self, name, value)
if isinstance(value, Setting):
if not value.hidden:
self._settings.append(value)
object.__setattr__(self, "_" + name, value)
elif hasattr(self, "_" + name):
getattr(self, "_" + name).value = value
else:
setattr(self, name, StringSetting(name = name, default = value))
def __getattr__(self, name):
if name.startswith("_"):
return object.__getattribute__(self, name)
return getattr(self, "_" + name).value
def _add(self, setting, callback = None):
setattr(self, setting.name, setting)
if callback:
setattr(self, "_" + setting.name + "_cb", callback)
def _set(self, key, value):
try:
value = getattr(self, "_%s_alias" % key)()[value]
except KeyError:
pass
except AttributeError:
pass
try:
getattr(self, "_%s_validate" % key)(value)
except AttributeError:
pass
t = type(getattr(self, key))
if t == bool and value == "False": setattr(self, key, False)
else: setattr(self, key, t(value))
try:
cb = None
try:
cb = getattr(self, "_%s_cb" % key)
except AttributeError:
pass
if cb is not None: cb(key, value)
except:
logging.warning((_("Failed to run callback after setting \"%s\":") % key) +
"\n" + traceback.format_exc())
return value
def _tabcomplete(self, key):
try:
return getattr(self, "_%s_list" % key)()
except AttributeError:
pass
try:
return getattr(self, "_%s_alias" % key)().keys()
except AttributeError:
pass
return []
def _all_settings(self):
return self._settings
class Status:
def __init__(self):
self.extruder_temp = 0
self.extruder_temp_target = 0
self.bed_temp = 0
self.bed_temp_target = 0
self.print_job = None
self.print_job_progress = 1.0
def update_tempreading(self, tempstr):
r = tempstr.split()
# eg. r = ["ok", "T:20.5", "/0.0", "B:0.0", "/0.0", "@:0"]
if len(r) == 6:
self.extruder_temp = float(r[1][2:])
self.extruder_temp_target = float(r[2][1:])
self.bed_temp = float(r[3][2:])
self.bed_temp_target = float(r[4][1:])
@property
def bed_enabled(self):
return self.bed_temp != 0
@property
def extruder_enabled(self):
return self.extruder_temp != 0
class pronsole(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
if not READLINE:
self.completekey = None
self.status = Status()
self.dynamic_temp = False
self.compute_eta = None
self.p = printcore.printcore()
self.p.recvcb = self.recvcb
self.p.startcb = self.startcb
self.p.endcb = self.endcb
self.p.layerchangecb = self.layer_change_cb
self.recvlisteners = []
self.in_macro = False
self.p.onlinecb = self.online
self.p.errorcb = self.logError
self.fgcode = None
self.listing = 0
self.sdfiles = []
self.paused = False
self.sdprinting = 0
self.temps = {"pla": "185", "abs": "230", "off": "0"}
self.bedtemps = {"pla": "60", "abs": "110", "off": "0"}
self.percentdone = 0
self.tempreadings = ""
self.macros = {}
self.rc_loaded = False
self.processing_rc = False
self.processing_args = False
self.settings = Settings()
self.settings._add(BuildDimensionsSetting("build_dimensions", "200x200x100+0+0+0+0+0+0", _("Build dimensions"), _("Dimensions of Build Platform\n & optional offset of origin\n & optional switch position\n\nExamples:\n XXXxYYY\n XXX,YYY,ZZZ\n XXXxYYYxZZZ+OffX+OffY+OffZ\nXXXxYYYxZZZ+OffX+OffY+OffZ+HomeX+HomeY+HomeZ"), "Printer"), self.update_build_dimensions)
self.settings._port_list = self.scanserial
self.settings._temperature_abs_cb = self.set_temp_preset
self.settings._temperature_pla_cb = self.set_temp_preset
self.settings._bedtemp_abs_cb = self.set_temp_preset
self.settings._bedtemp_pla_cb = self.set_temp_preset
self.update_build_dimensions(None, self.settings.build_dimensions)
self.monitoring = 0
self.starttime = 0
self.extra_print_time = 0
self.silent = False
self.commandprefixes = 'MGT$'
self.promptstrs = {"offline": "%(bold)suninitialized>%(normal)s ",
"fallback": "%(bold)sPC>%(normal)s ",
"macro": "%(bold)s..>%(normal)s ",
"online": "%(bold)sT:%(extruder_temp_fancy)s %(progress_fancy)s >%(normal)s "}
def confirm(self):
y_or_n = raw_input("y/n: ")
if y_or_n == "y":
return True
elif y_or_n != "n":
return self.confirm()
return False
def log(self, *msg):
print u"".join(unicode(i) for i in msg)
def logError(self, *msg):
msg = u"".join(unicode(i) for i in msg)
logging.error(msg)
if not self.settings.error_command:
return
run_command(self.settings.error_command,
{"$m": msg},
stderr = subprocess.STDOUT, stdout = subprocess.PIPE,
blocking = False)
def promptf(self):
"""A function to generate prompts so that we can do dynamic prompts. """
if self.in_macro:
promptstr = self.promptstrs["macro"]
elif not self.p.online:
promptstr = self.promptstrs["offline"]
elif self.status.extruder_enabled:
promptstr = self.promptstrs["online"]
else:
promptstr = self.promptstrs["fallback"]
if not "%" in promptstr:
return promptstr
else:
specials = {}
specials["extruder_temp"] = str(int(self.status.extruder_temp))
specials["extruder_temp_target"] = str(int(self.status.extruder_temp_target))
if self.status.extruder_temp_target == 0:
specials["extruder_temp_fancy"] = str(int(self.status.extruder_temp))
else:
specials["extruder_temp_fancy"] = "%s/%s" % (str(int(self.status.extruder_temp)), str(int(self.status.extruder_temp_target)))
if self.p.printing:
progress = int(1000 * float(self.p.queueindex) / len(self.p.mainqueue)) / 10
elif self.sdprinting:
progress = self.percentdone
else:
progress = 0.0
specials["progress"] = str(progress)
if self.p.printing or self.sdprinting:
specials["progress_fancy"] = str(progress) + "%"
else:
specials["progress_fancy"] = "?%"
specials["bold"] = "\033[01m"
specials["normal"] = "\033[00m"
return promptstr % specials
def postcmd(self, stop, line):
""" A hook we override to generate prompts after
each command is executed, for the next prompt.
We also use it to send M105 commands so that
temp info gets updated for the prompt."""
if self.p.online and self.dynamic_temp:
self.p.send_now("M105")
self.prompt = self.promptf()
return stop
def set_temp_preset(self, key, value):
if not key.startswith("bed"):
self.temps["pla"] = str(self.settings.temperature_pla)
self.temps["abs"] = str(self.settings.temperature_abs)
self.log("Hotend temperature presets updated, pla:%s, abs:%s" % (self.temps["pla"], self.temps["abs"]))
else:
self.bedtemps["pla"] = str(self.settings.bedtemp_pla)
self.bedtemps["abs"] = str(self.settings.bedtemp_abs)
self.log("Bed temperature presets updated, pla:%s, abs:%s" % (self.bedtemps["pla"], self.bedtemps["abs"]))
def scanserial(self):
"""scan for available ports. return a list of device names."""
baselist = []
if os.name == "nt":
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "HARDWARE\\DEVICEMAP\\SERIALCOMM")
i = 0
while(1):
baselist += [_winreg.EnumValue(key, i)[1]]
i += 1
except:
pass
for g in ['/dev/ttyUSB*', '/dev/ttyACM*', "/dev/tty.*", "/dev/cu.*", "/dev/rfcomm*"]:
baselist += glob.glob(g)
return filter(self._bluetoothSerialFilter, baselist)
def _bluetoothSerialFilter(self, serial):
return not ("Bluetooth" in serial or "FireFly" in serial)
def online(self):
self.log("\rPrinter is now online")
self.write_prompt()
def write_prompt(self):
sys.stdout.write(self.promptf())
sys.stdout.flush()
def help_help(self, l = ""):
self.do_help("")
def do_gcodes(self, l = ""):
self.help_gcodes()
def help_gcodes(self):
self.log("Gcodes are passed through to the printer as they are")
def complete_macro(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.macros.keys() if i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in ["/D", "/S"] + self.completenames(text) if i.startswith(text)]
else:
return []
def hook_macro(self, l):
l = l.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if len(ws) == 0:
self.end_macro()
# pass the unprocessed line to regular command processor to not require empty line in .pronsolerc
return self.onecmd(l)
self.cur_macro_def += l + "\n"
def end_macro(self):
if "onecmd" in self.__dict__: del self.onecmd # remove override
self.in_macro = False
self.prompt = self.promptf()
if self.cur_macro_def != "":
self.macros[self.cur_macro_name] = self.cur_macro_def
macro = self.compile_macro(self.cur_macro_name, self.cur_macro_def)
setattr(self.__class__, "do_" + self.cur_macro_name, lambda self, largs, macro = macro: macro(self, *largs.split()))
setattr(self.__class__, "help_" + self.cur_macro_name, lambda self, macro_name = self.cur_macro_name: self.subhelp_macro(macro_name))
if not self.processing_rc:
self.log("Macro '" + self.cur_macro_name + "' defined")
# save it
if not self.processing_args:
macro_key = "macro " + self.cur_macro_name
macro_def = macro_key
if "\n" in self.cur_macro_def:
macro_def += "\n"
else:
macro_def += " "
macro_def += self.cur_macro_def
self.save_in_rc(macro_key, macro_def)
else:
self.logError("Empty macro - cancelled")
del self.cur_macro_name, self.cur_macro_def
def parseusercmd(self, line):
pass
def compile_macro_line(self, line):
line = line.rstrip()
ls = line.lstrip()
ws = line[:len(line) - len(ls)] # just leading whitespace
if ls == "" or ls.startswith('#'): return "" # no code
if ls.startswith('!'):
return ws + ls[1:] + "\n" # python mode
else:
ls = ls.replace('"', '\\"') # need to escape double quotes
ret = ws + 'self.parseusercmd("' + ls + '".format(*arg))\n' # parametric command mode
return ret + ws + 'self.onecmd("' + ls + '".format(*arg))\n'
def compile_macro(self, macro_name, macro_def):
if macro_def.strip() == "":
self.logError("Empty macro - cancelled")
return
macro = None
pycode = "def macro(self,*arg):\n"
if "\n" not in macro_def.strip():
pycode += self.compile_macro_line(" " + macro_def.strip())
else:
lines = macro_def.split("\n")
for l in lines:
pycode += self.compile_macro_line(l)
exec pycode
return macro
def start_macro(self, macro_name, prev_definition = "", suppress_instructions = False):
if not self.processing_rc and not suppress_instructions:
self.logError("Enter macro using indented lines, end with empty line")
self.cur_macro_name = macro_name
self.cur_macro_def = ""
self.onecmd = self.hook_macro # override onecmd temporarily
self.in_macro = False
self.prompt = self.promptf()
def delete_macro(self, macro_name):
if macro_name in self.macros.keys():
delattr(self.__class__, "do_" + macro_name)
del self.macros[macro_name]
self.log("Macro '" + macro_name + "' removed")
if not self.processing_rc and not self.processing_args:
self.save_in_rc("macro " + macro_name, "")
else:
self.logError("Macro '" + macro_name + "' is not defined")
def do_macro(self, args):
if args.strip() == "":
self.print_topics("User-defined macros", map(str, self.macros.keys()), 15, 80)
return
arglist = args.split(None, 1)
macro_name = arglist[0]
if macro_name not in self.macros and hasattr(self.__class__, "do_" + macro_name):
self.logError("Name '" + macro_name + "' is being used by built-in command")
return
if len(arglist) == 2:
macro_def = arglist[1]
if macro_def.lower() == "/d":
self.delete_macro(macro_name)
return
if macro_def.lower() == "/s":
self.subhelp_macro(macro_name)
return
self.cur_macro_def = macro_def
self.cur_macro_name = macro_name
self.end_macro()
return
if macro_name in self.macros:
self.start_macro(macro_name, self.macros[macro_name])
else:
self.start_macro(macro_name)
def help_macro(self):
self.log("Define single-line macro: macro <name> <definition>")
self.log("Define multi-line macro: macro <name>")
self.log("Enter macro definition in indented lines. Use {0} .. {N} to substitute macro arguments")
self.log("Enter python code, prefixed with ! Use arg[0] .. arg[N] to substitute macro arguments")
self.log("Delete macro: macro <name> /d")
self.log("Show macro definition: macro <name> /s")
self.log("'macro' without arguments displays list of defined macros")
def subhelp_macro(self, macro_name):
if macro_name in self.macros.keys():
macro_def = self.macros[macro_name]
if "\n" in macro_def:
self.log("Macro '" + macro_name + "' defined as:")
self.log(self.macros[macro_name] + "----------------")
else:
self.log("Macro '" + macro_name + "' defined as: '" + macro_def + "'")
else:
self.logError("Macro '" + macro_name + "' is not defined")
def set(self, var, str):
try:
t = type(getattr(self.settings, var))
value = self.settings._set(var, str)
if not self.processing_rc and not self.processing_args:
self.save_in_rc("set " + var, "set %s %s" % (var, value))
except AttributeError:
logging.warning("Unknown variable '%s'" % var)
except ValueError, ve:
self.logError("Bad value for variable '%s', expecting %s (%s)" % (var, repr(t)[1:-1], ve.args[0]))
def do_set(self, argl):
args = argl.split(None, 1)
if len(args) < 1:
for k in [kk for kk in dir(self.settings) if not kk.startswith("_")]:
self.log("%s = %s" % (k, str(getattr(self.settings, k))))
return
if len(args) < 2:
try:
self.log("%s = %s" % (args[0], getattr(self.settings, args[0])))
except AttributeError:
logging.warning("Unknown variable '%s'" % args[0])
return
self.set(args[0], args[1])
def help_set(self):
self.log("Set variable: set <variable> <value>")
self.log("Show variable: set <variable>")
self.log("'set' without arguments displays all variables")
def complete_set(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in dir(self.settings) if not i.startswith("_") and i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in self.settings._tabcomplete(line.split()[1]) if i.startswith(text)]
else:
return []
def postloop(self):
self.p.disconnect()
cmd.Cmd.postloop(self)
def load_rc(self, rc_filename):
self.processing_rc = True
try:
rc = codecs.open(rc_filename, "r", "utf-8")
self.rc_filename = os.path.abspath(rc_filename)
for rc_cmd in rc:
if not rc_cmd.lstrip().startswith("#"):
self.onecmd(rc_cmd)
rc.close()
if hasattr(self, "cur_macro_def"):
self.end_macro()
self.rc_loaded = True
finally:
self.processing_rc = False
def load_default_rc(self, rc_filename = ".pronsolerc"):
if rc_filename == ".pronsolerc" and hasattr(sys, "frozen") and sys.frozen in ["windows_exe", "console_exe"]:
rc_filename = "printrunconf.ini"
try:
try:
self.load_rc(os.path.join(os.path.expanduser("~"), rc_filename))
except IOError:
self.load_rc(rc_filename)
except IOError:
# make sure the filename is initialized
self.rc_filename = os.path.abspath(os.path.join(os.path.expanduser("~"), rc_filename))
def save_in_rc(self, key, definition):
"""
Saves or updates macro or other definitions in .pronsolerc
key is prefix that determines what is being defined/updated (e.g. 'macro foo')
definition is the full definition (that is written to file). (e.g. 'macro foo move x 10')
Set key as empty string to just add (and not overwrite)
Set definition as empty string to remove it from .pronsolerc
To delete line from .pronsolerc, set key as the line contents, and definition as empty string
Only first definition with given key is overwritten.
Updates are made in the same file position.
Additions are made to the end of the file.
"""
rci, rco = None, None
if definition != "" and not definition.endswith("\n"):
definition += "\n"
try:
written = False
if os.path.exists(self.rc_filename):
import shutil
shutil.copy(self.rc_filename, self.rc_filename + "~bak")
rci = codecs.open(self.rc_filename + "~bak", "r", "utf-8")
rco = codecs.open(self.rc_filename, "w", "utf-8")
if rci is not None:
overwriting = False
for rc_cmd in rci:
l = rc_cmd.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if overwriting and len(ws) == 0:
overwriting = False
if not written and key != "" and rc_cmd.startswith(key) and (rc_cmd + "\n")[len(key)].isspace():
overwriting = True
written = True
rco.write(definition)
if not overwriting:
rco.write(rc_cmd)
if not rc_cmd.endswith("\n"): rco.write("\n")
if not written:
rco.write(definition)
if rci is not None:
rci.close()
rco.close()
#if definition != "":
# self.log("Saved '"+key+"' to '"+self.rc_filename+"'")
#else:
# self.log("Removed '"+key+"' from '"+self.rc_filename+"'")
except Exception, e:
self.logError("Saving failed for ", key + ":", str(e))
finally:
del rci, rco
def preloop(self):
self.log("Welcome to the printer console! Type \"help\" for a list of available commands.")
self.prompt = self.promptf()
cmd.Cmd.preloop(self)
def do_connect(self, l):
a = l.split()
p = self.scanserial()
port = self.settings.port
if (port == "" or port not in p) and len(p) > 0:
port = p[0]
baud = self.settings.baudrate or 115200
if len(a) > 0:
port = a[0]
if len(a) > 1:
try:
baud = int(a[1])
except:
self.log("Bad baud value '" + a[1] + "' ignored")
if len(p) == 0 and not port:
self.log("No serial ports detected - please specify a port")
return
if len(a) == 0:
self.log("No port specified - connecting to %s at %dbps" % (port, baud))
if port != self.settings.port:
self.settings.port = port
self.save_in_rc("set port", "set port %s" % port)
if baud != self.settings.baudrate:
self.settings.baudrate = baud
self.save_in_rc("set baudrate", "set baudrate %d" % baud)
self.p.connect(port, baud)
def help_connect(self):
self.log("Connect to printer")
self.log("connect <port> <baudrate>")
self.log("If port and baudrate are not specified, connects to first detected port at 115200bps")
ports = self.scanserial()
if(len(ports)):
self.log("Available ports: ", " ".join(ports))
else:
self.log("No serial ports were automatically found.")
def complete_connect(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.scanserial() if i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in ["2400", "9600", "19200", "38400", "57600", "115200"] if i.startswith(text)]
else:
return []
def do_disconnect(self, l):
self.p.disconnect()
def help_disconnect(self):
self.log("Disconnects from the printer")
def do_load(self, filename):
self._do_load(filename)
def _do_load(self, filename):
if not filename:
self.logError("No file name given.")
return
self.logError("Loading file: " + filename)
if not os.path.exists(filename):
self.logError("File not found!")
return
self.load_gcode(filename)
self.log(_("Loaded %s, %d lines.") % (filename, len(self.fgcode)))
self.log(_("Estimated duration: %s") % self.fgcode.estimate_duration())
def load_gcode(self, filename):
self.fgcode = gcoder.GCode(open(filename, "rU"),
get_home_pos(self.build_dimensions_list))
self.fgcode.estimate_duration()
self.filename = filename
def complete_load(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_load(self):
self.log("Loads a gcode file (with tab-completion)")
def do_upload(self, l):
names = l.split()
if len(names) == 2:
filename = names[0]
targetname = names[1]
else:
self.logError(_("Please enter target name in 8.3 format."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self._do_load(filename)
self.log(_("Uploading as %s") % targetname)
self.log(_("Uploading %s") % self.filename)
self.p.send_now("M28 " + targetname)
self.log(_("Press Ctrl-C to interrupt upload."))
self.p.startprint(self.fgcode)
try:
sys.stdout.write(_("Progress: ") + "00.0%")
sys.stdout.flush()
time.sleep(1)
while self.p.printing:
time.sleep(1)
sys.stdout.write("\b\b\b\b\b%04.1f%%" % (100 * float(self.p.queueindex) / len(self.p.mainqueue),))
sys.stdout.flush()
self.p.send_now("M29 " + targetname)
self.sleep(0.2)
self.p.clear = 1
self._do_ls(False)
self.log("\b\b\b\b\b100%.")
self.log(_("Upload completed. %s should now be on the card.") % targetname)
return
except:
self.logError(_("...interrupted!"))
self.p.pause()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.clear = 1
self.p.startprint(None)
self.logError(_("A partial file named %s may have been written to the sd card.") % targetname)
def complete_upload(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_upload(self):
self.log("Uploads a gcode file to the sd card")
def help_print(self):
if not self.fgcode:
self.log(_("Send a loaded gcode file to the printer. Load a file with the load command first."))
else:
self.log(_("Send a loaded gcode file to the printer. You have %s loaded right now.") % self.filename)
def do_print(self, l):
if not self.fgcode:
self.logError(_("No file loaded. Please use load first."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self.log(_("Printing %s") % self.filename)
self.log(_("You can monitor the print with the monitor command."))
self.p.startprint(self.fgcode)
def do_pause(self, l):
if self.sdprinting:
self.p.send_now("M25")
else:
if not self.p.printing:
self.logError(_("Not printing, cannot pause."))
return
self.p.pause()
self.paused = True
def help_pause(self):
self.log(_("Pauses a running print"))
def pause(self, event):
return self.do_pause(None)
def do_resume(self, l):
if not self.paused:
self.logError(_("Not paused, unable to resume. Start a print first."))
return
self.paused = False
if self.sdprinting:
self.p.send_now("M24")
return
else:
self.p.resume()
def help_resume(self):
self.log(_("Resumes a paused print."))
def emptyline(self):
pass
def do_shell(self, l):
exec(l)
def listfiles(self, line, echo = False):
if "Begin file list" in line:
self.listing = 1
elif "End file list" in line:
self.listing = 0
self.recvlisteners.remove(self.listfiles)
if echo:
self.log(_("Files on SD card:"))
self.log("\n".join(self.sdfiles))
elif self.listing:
self.sdfiles.append(line.strip().lower())
def _do_ls(self, echo):
# FIXME: this was 2, but I think it should rather be 0 as in do_upload
self.listing = 0
self.sdfiles = []
self.recvlisteners.append(lambda l: self.listfiles(l, echo))
self.p.send_now("M20")
def do_ls(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
self._do_ls(True)
def help_ls(self):
self.log(_("Lists files on the SD card"))
def waitforsdresponse(self, l):
if "file.open failed" in l:
self.logError(_("Opening file failed."))
self.recvlisteners.remove(self.waitforsdresponse)
return
if "File opened" in l:
self.log(l)
if "File selected" in l:
self.log(_("Starting print"))
self.p.send_now("M24")
self.sdprinting = 1
#self.recvlisteners.remove(self.waitforsdresponse)
return
if "Done printing file" in l:
self.log(l)
self.sdprinting = 0
self.recvlisteners.remove(self.waitforsdresponse)
return
if "SD printing byte" in l:
#M27 handler
try:
resp = l.split()
vals = resp[-1].split("/")
self.percentdone = 100.0 * int(vals[0]) / int(vals[1])
except:
pass
def do_reset(self, l):
self.p.reset()
def help_reset(self):
self.log(_("Resets the printer."))
def do_sdprint(self, l):
if not self.p.online:
self.log(_("Printer is not online. Please connect to it first."))
return
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if l.lower() not in self.sdfiles:
self.log(_("File is not present on card. Please upload it first."))
return
self.recvlisteners.append(self.waitforsdresponse)
self.p.send_now("M23 " + l.lower())
self.log(_("Printing file: %s from SD card.") % l.lower())
self.log(_("Requesting SD print..."))
time.sleep(1)
def help_sdprint(self):
self.log(_("Print a file from the SD card. Tab completes with available file names."))
self.log(_("sdprint filename.g"))
def complete_sdprint(self, text, line, begidx, endidx):
if not self.sdfiles and self.p.online:
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.sdfiles if i.startswith(text)]
def recvcb(self, l):
if "T:" in l:
self.tempreadings = l
self.status.update_tempreading(l)
tstring = l.rstrip()
if tstring != "ok" and not self.listing and not self.monitoring:
if tstring[:5] == "echo:":
tstring = tstring[5:].lstrip()
if self.silent is False: print "\r" + tstring.ljust(15)
sys.stdout.write(self.promptf())
sys.stdout.flush()
for i in self.recvlisteners:
i(l)
def startcb(self, resuming = False):
self.starttime = time.time()
if resuming:
print _("Print resumed at: %s") % format_time(self.starttime)
else:
print _("Print started at: %s") % format_time(self.starttime)
self.compute_eta = RemainingTimeEstimator(self.fgcode)
def endcb(self):
if self.p.queueindex == 0:
print_duration = int(time.time() - self.starttime + self.extra_print_time)
print _("Print ended at: %(end_time)s and took %(duration)s") % {"end_time": format_time(time.time()),
"duration": format_duration(print_duration)}
self.p.runSmallScript(self.endScript)
if not self.settings.final_command:
return
run_command(self.settings.final_command,
{"$s": str(self.filename),
"$t": format_duration(print_duration)},
stderr = subprocess.STDOUT, stdout = subprocess.PIPE,
blocking = False)
def layer_change_cb(self, newlayer):
if self.compute_eta:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
self.compute_eta.update_layer(newlayer, secondselapsed)
def do_eta(self, l):
if not self.p.printing:
self.logError(_("Printer is not currently printing. No ETA available."))
else:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
secondsremain, secondsestimate = self.compute_eta(self.p.queueindex, secondselapsed)
eta = _("Est: %s of %s remaining") % (format_duration(secondsremain),
format_duration(secondsestimate))
self.log(eta.strip())
def help_eta(self):
self.log(_("Displays estimated remaining print time."))
def help_shell(self):
self.log("Executes a python command. Example:")
self.log("! os.listdir('.')")
def default(self, l):
if l[0] in self.commandprefixes.upper():
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l)
self.p.send_now(l)
else:
self.logError(_("Printer is not online."))
return
elif l[0] in self.commandprefixes.lower():
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l.upper())
self.p.send_now(l.upper())
else:
self.logError(_("Printer is not online."))
return
elif l[0] == "@":
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l[1:])
self.p.send_now(l[1:])
else:
self.logError(_("Printer is not online."))
return
else:
cmd.Cmd.default(self, l)
def tempcb(self, l):
if "T:" in l:
self.log(l.strip().replace("T", "Hotend").replace("B", "Bed").replace("ok ", ""))
def do_gettemp(self, l):
if "dynamic" in l:
self.dynamic_temp = True
if self.p.online:
self.p.send_now("M105")
time.sleep(0.75)
if not self.status.bed_enabled:
print "Hotend: %s/%s" % (self.status.extruder_temp, self.status.extruder_temp_target)
else:
print "Hotend: %s/%s" % (self.status.extruder_temp, self.status.extruder_temp_target)
print "Bed: %s/%s" % (self.status.bed_temp, self.status.bed_temp_target)
def help_gettemp(self):
self.log(_("Read the extruder and bed temperature."))
def do_settemp(self, l):
l = l.lower().replace(", ", ".")
for i in self.temps.keys():
l = l.replace(i, self.temps[i])
try:
f = float(l)
except:
self.logError(_("You must enter a temperature."))
return
if f >= 0:
if f > 250:
print _("%s is a high temperature to set your extruder to. Are you sure you want to do that?") % f
if not self.confirm():
return
if self.p.online:
self.p.send_now("M104 S" + l)
self.log(_("Setting hotend temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0."))
def help_settemp(self):
self.log(_("Sets the hotend temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.temps[i] + ")" for i in self.temps.keys()]))
def complete_settemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.temps.keys() if i.startswith(text)]
def do_bedtemp(self, l):
f = None
try:
l = l.lower().replace(", ", ".")
for i in self.bedtemps.keys():
l = l.replace(i, self.bedtemps[i])
f = float(l)
except:
self.logError(_("You must enter a temperature."))
if f is not None and f >= 0:
if self.p.online:
self.p.send_now("M140 S" + l)
self.log(_("Setting bed temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the bed off entirely, set its temperature to 0."))
def help_bedtemp(self):
self.log(_("Sets the bed temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.bedtemps[i] + ")" for i in self.bedtemps.keys()]))
def complete_bedtemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.bedtemps.keys() if i.startswith(text)]
def do_tool(self, l):
tool = None
try:
tool = int(l.lower().strip())
except:
self.logError(_("You must specify the tool index as an integer."))
if tool is not None and tool >= 0:
if self.p.online:
self.p.send_now("T%d" % tool)
self.log(_("Using tool %d.") % tool)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative tool numbers."))
def help_tool(self):
self.log(_("Switches to the specified tool (e.g. doing tool 1 will emit a T1 G-Code)."))
def do_move(self, l):
if(len(l.split()) < 2):
self.logError(_("No move specified."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
l = l.split()
if(l[0].lower() == "x"):
feed = self.settings.xy_feedrate
axis = "X"
elif(l[0].lower() == "y"):
feed = self.settings.xy_feedrate
axis = "Y"
elif(l[0].lower() == "z"):
feed = self.settings.z_feedrate
axis = "Z"
elif(l[0].lower() == "e"):
feed = self.settings.e_feedrate
axis = "E"
else:
self.logError(_("Unknown axis."))
return
try:
float(l[1]) # check if distance can be a float
except:
self.logError(_("Invalid distance"))
return
try:
feed = int(l[2])
except:
pass
self.p.send_now("G91")
self.p.send_now("G1 " + axis + str(l[1]) + " F" + str(feed))
self.p.send_now("G90")
def help_move(self):
self.log(_("Move an axis. Specify the name of the axis and the amount. "))
self.log(_("move X 10 will move the X axis forward by 10mm at %s mm/min (default XY speed)") % self.settings.xy_feedrate)
self.log(_("move Y 10 5000 will move the Y axis forward by 10mm at 5000mm/min"))
self.log(_("move Z -1 will move the Z axis down by 1mm at %s mm/min (default Z speed)") % self.settings.z_feedrate)
self.log(_("Common amounts are in the tabcomplete list."))
def complete_move(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in ["X ", "Y ", "Z ", "E "] if i.lower().startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
base = line.split()[-1]
rlen = 0
if base.startswith("-"):
rlen = 1
if line[-1] == " ":
base = ""
return [i[rlen:] for i in ["-100", "-10", "-1", "-0.1", "100", "10", "1", "0.1", "-50", "-5", "-0.5", "50", "5", "0.5", "-200", "-20", "-2", "-0.2", "200", "20", "2", "0.2"] if i.startswith(base)]
else:
return []
def do_extrude(self, l, override = None, overridefeed = 300):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError("Printer is not online. Unable to extrude.")
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
if override is not None:
length = override
feed = overridefeed
if length > 0:
self.log(_("Extruding %fmm of filament.") % (length,))
elif length < 0:
self.log(_("Reversing %fmm of filament.") % (-length,))
else:
self.log(_("Length is 0, not doing anything."))
self.p.send_now("G91")
self.p.send_now("G1 E" + str(length) + " F" + str(feed))
self.p.send_now("G90")
def help_extrude(self):
self.log(_("Extrudes a length of filament, 5mm by default, or the number of mm given as a parameter"))
self.log(_("extrude - extrudes 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 20 - extrudes 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude -5 - REVERSES 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
def do_reverse(self, l):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError(_("Printer is not online. Unable to reverse."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
self.do_extrude("", -length, feed)
def help_reverse(self):
self.log(_("Reverses the extruder, 5mm by default, or the number of mm given as a parameter"))
self.log(_("reverse - reverses 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 20 - reverses 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
self.log(_("reverse -5 - EXTRUDES 5mm of filament at 300mm/min (5mm/s)"))
def do_exit(self, l):
if self.status.extruder_temp_target != 0:
print "Setting extruder temp to 0"
self.p.send_now("M104 S0.0")
if self.status.bed_enabled:
if self.status.bed_temp_taret != 0:
print "Setting bed temp to 0"
self.p.send_now("M140 S0.0")
self.log("Disconnecting from printer...")
if self.p.printing:
print "Are you sure you want to exit while printing?"
print "(this will terminate the print)."
if not self.confirm():
return
self.log(_("Exiting program. Goodbye!"))
self.p.disconnect()
sys.exit()
def help_exit(self):
self.log(_("Disconnects from the printer and exits the program."))
def do_monitor(self, l):
interval = 5
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
if not (self.p.printing or self.sdprinting):
self.logError(_("Printer is not printing. Please print something before monitoring."))
return
self.log(_("Monitoring printer, use ^C to interrupt."))
if len(l):
try:
interval = float(l)
except:
self.logError(_("Invalid period given."))
self.log(_("Updating values every %f seconds.") % (interval,))
self.monitoring = 1
prev_msg_len = 0
try:
while True:
self.p.send_now("M105")
if self.sdprinting:
self.p.send_now("M27")
time.sleep(interval)
#print (self.tempreadings.replace("\r", "").replace("T", "Hotend").replace("B", "Bed").replace("\n", "").replace("ok ", ""))
if self.p.printing:
preface = _("Print progress: ")
progress = 100 * float(self.p.queueindex) / len(self.p.mainqueue)
elif self.sdprinting:
preface = _("Print progress: ")
progress = self.percentdone
prev_msg = preface + "%.1f%%" % progress
if self.silent is False:
sys.stdout.write("\r" + prev_msg.ljust(prev_msg_len))
sys.stdout.flush()
prev_msg_len = len(prev_msg)
except KeyboardInterrupt:
if self.silent is False: print _("Done monitoring.")
self.monitoring = 0
def help_monitor(self):
self.log(_("Monitor a machine's temperatures and an SD print's status."))
self.log(_("monitor - Reports temperature and SD print status (if SD printing) every 5 seconds"))
self.log(_("monitor 2 - Reports temperature and SD print status (if SD printing) every 2 seconds"))
def expandcommand(self, c):
return c.replace("$python", sys.executable)
def do_skein(self, l):
l = l.split()
if len(l) == 0:
self.logError(_("No file name given."))
return
settings = 0
if l[0] == "set":
settings = 1
else:
self.log(_("Skeining file: %s") % l[0])
if not(os.path.exists(l[0])):
self.logError(_("File not found!"))
return
try:
if settings:
command = self.settings.sliceoptscommand
self.log(_("Entering slicer settings: %s") % command)
run_command(command, blocking = True)
else:
command = self.settings.slicecommand
self.log(_("Slicing: ") % command)
stl_name = l[0]
gcode_name = stl_name.replace(".stl", "_export.gcode").replace(".STL", "_export.gcode")
run_command(command,
{"$s": stl_name,
"$o": gcode_name},
blocking = True)
self.log(_("Loading sliced file."))
self.do_load(l[0].replace(".stl", "_export.gcode"))
except Exception, e:
self.logError(_("Slicing failed: %s") % e)
def complete_skein(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.stl")]
else:
return glob.glob("*/") + glob.glob("*.stl")
def help_skein(self):
self.log(_("Creates a gcode file from an stl model using the slicer (with tab-completion)"))
self.log(_("skein filename.stl - create gcode file"))
self.log(_("skein filename.stl view - create gcode file and view using skeiniso"))
self.log(_("skein set - adjust slicer settings"))
def do_home(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if "x" in l.lower():
self.p.send_now("G28 X0")
if "y" in l.lower():
self.p.send_now("G28 Y0")
if "z" in l.lower():
self.p.send_now("G28 Z0")
if "e" in l.lower():
self.p.send_now("G92 E0")
if not len(l):
self.p.send_now("G28")
self.p.send_now("G92 E0")
def help_home(self):
self.log(_("Homes the printer"))
self.log(_("home - homes all axes and zeroes the extruder(Using G28 and G92)"))
self.log(_("home xy - homes x and y axes (Using G28)"))
self.log(_("home z - homes z axis only (Using G28)"))
self.log(_("home e - set extruder position to zero (Using G92)"))
self.log(_("home xyze - homes all axes and zeroes the extruder (Using G28 and G92)"))
def do_off(self, l):
self.off()
def off(self, ignore = None):
if self.p.online:
if self.p.printing: self.pause(None)
self.log(_("; Motors off"))
self.onecmd("M84")
self.log(_("; Extruder off"))
self.onecmd("M104 S0")
self.log(_("; Heatbed off"))
self.onecmd("M140 S0")
self.log(_("; Fan off"))
self.onecmd("M107")
self.log(_("; Power supply off"))
self.onecmd("M81")
else:
self.logError(_("Printer is not online. Unable to turn it off."))
def help_off(self):
self.log(_("Turns off everything on the printer"))
def add_cmdline_arguments(self, parser):
parser.add_argument('-c', '--conf', '--config', help = _("load this file on startup instead of .pronsolerc ; you may chain config files, if so settings auto-save will use the last specified file"), action = "append", default = [])
parser.add_argument('-e', '--execute', help = _("executes command after configuration/.pronsolerc is loaded ; macros/settings from these commands are not autosaved"), action = "append", default = [])
parser.add_argument('filename', nargs='?', help = _("file to load"))
def process_cmdline_arguments(self, args):
for config in args.conf:
self.load_rc(config)
if not self.rc_loaded:
self.load_default_rc()
self.processing_args = True
for command in args.execute:
self.onecmd(command)
self.processing_args = False
if args.filename:
filename = args.filename.decode(locale.getpreferredencoding())
self.cmdline_filename_callback(filename)
def cmdline_filename_callback(self, filename):
self.do_load(filename)
def parse_cmdline(self, args):
parser = argparse.ArgumentParser(description = 'Printrun 3D printer interface')
self.add_cmdline_arguments(parser)
args = [arg for arg in args if not arg.startswith("-psn")]
args = parser.parse_args(args = args)
self.process_cmdline_arguments(args)
def update_build_dimensions(self, param, value):
self.build_dimensions_list = parse_build_dimensions(value)
#self.p.analyzer.home_pos = get_home_pos(self.build_dimensions_list)
# We replace this function, defined in cmd.py .
# It's default behavior with reagrds to Ctr-C
# and Ctr-D doesn't make much sense...
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
print ""
self.do_exit("")
except KeyboardInterrupt:
print ""
line = ""
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = ""
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
| madsbuch/GridDispenser | dispenserrun/printrun/printrun/pronsole.py | Python | gpl-2.0 | 67,927 | [
"Firefly"
] | f9bb5089631fc628670b16909f39f1a32375016e36fb06774b7409ffa96eb16d |
import vtk, qt, ctk, slicer
import numpy
import time
class PickAndPaint:
def __init__(self, parent):
parent.title = "Pick 'n Paint "
parent.dependencies = []
parent.contributors = ["Lucie Macron"]
parent.helpText = """
"""
parent.acknowledgementText = """
This module was developed by Lucie Macron, University of Michigan
"""
self.parent = parent
class PickAndPaintWidget:
class fiducialState(object):
def __init__(self):
self.fiducialLabel = None
self.fiducialScale = 2.0
self.radiusROI = 0.0
self.indexClosestPoint = -1
self.arrayName = None
self.mouvementSurfaceStatus = True
self.propagatedBool = False
class inputState (object):
def __init__(self):
self.inputModelNode = None
self.fidNodeID = None
self.MarkupAddedEventTag = None
self.PointModifiedEventTag = None
self.dictionaryLandmark = dict() # Key = ID of markups
self.dictionaryLandmark.clear()
# ------------------------- PROPAGATION ------------------------
self.dictionaryPropInput = dict() # Key = ID of Propagated Model Node
self.dictionaryPropInput.clear()
self.propagationType = 0 # Type of propagation
# 0: No type specified
# 1: Correspondent Shapes
# 2: Non Correspondent Shapes
def __init__(self, parent=None):
self.developerMode = True
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
print " ----- SetUp ------"
if self.developerMode:
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module"
self.reloadButton.name = "SurfaceToolbox Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# ------------------------------------------------------------------------------------
# Global Variables
# ------------------------------------------------------------------------------------
self.logic = PickAndPaintLogic()
self.dictionaryInput = dict()
self.dictionaryInput.clear()
self.propInputID = -1
# ------ REVIEW PROPAGATED MESHES --------------
self.propMarkupsNode = slicer.vtkMRMLMarkupsFiducialNode()
self.propMarkupsNode.SetName('PropagationMarkupsNode')
self.PropPointModifiedEventTag = None
self.propLandmarkIndex = -1
self.refLandmarkID = None
#-------------------------------------------------------------------------------------
# Interaction with 3D Scene
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
self.interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
#-------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# Input Selection
# ------------------------------------------------------------------------------------
inputLabel = qt.QLabel("Model of Reference: ")
self.inputModelSelector = slicer.qMRMLNodeComboBox()
self.inputModelSelector.objectName = 'inputFiducialsNodeSelector'
self.inputModelSelector.nodeTypes = ['vtkMRMLModelNode']
self.inputModelSelector.selectNodeUponCreation = False
self.inputModelSelector.addEnabled = False
self.inputModelSelector.removeEnabled = False
self.inputModelSelector.noneEnabled = True
self.inputModelSelector.showHidden = False
self.inputModelSelector.showChildNodeTypes = False
self.inputModelSelector.setMRMLScene(slicer.mrmlScene)
inputModelSelectorFrame = qt.QFrame(self.parent)
inputModelSelectorFrame.setLayout(qt.QHBoxLayout())
inputModelSelectorFrame.layout().addWidget(inputLabel)
inputModelSelectorFrame.layout().addWidget(self.inputModelSelector)
# ------------------------------------------------------------------------------------
# BUTTONS
# ------------------------------------------------------------------------------------
# ------------------------------- Add Fiducials Group --------------------------------
# Fiducials Scale
self.fiducialsScaleWidget = ctk.ctkSliderWidget()
self.fiducialsScaleWidget.singleStep = 0.1
self.fiducialsScaleWidget.minimum = 0.1
self.fiducialsScaleWidget.maximum = 20.0
self.fiducialsScaleWidget.value = 2.0
fiducialsScaleLayout = qt.QFormLayout()
fiducialsScaleLayout.addRow("Scale: ", self.fiducialsScaleWidget)
# Add Fiducials Button
self.addFiducialsButton = qt.QPushButton(" Add ")
self.addFiducialsButton.enabled = True
# Movements on the surface
self.surfaceDeplacementCheckBox = qt.QCheckBox("On Surface")
self.surfaceDeplacementCheckBox.setChecked(True)
# Layouts
scaleAndAddFiducialLayout = qt.QHBoxLayout()
scaleAndAddFiducialLayout.addWidget(self.addFiducialsButton)
scaleAndAddFiducialLayout.addLayout(fiducialsScaleLayout)
scaleAndAddFiducialLayout.addWidget(self.surfaceDeplacementCheckBox)
# Add Fiducials GroupBox
addFiducialBox = qt.QGroupBox()
addFiducialBox.title = " Landmarks "
addFiducialBox.setLayout(scaleAndAddFiducialLayout)
# ----------------------------------- ROI Group ------------------------------------
# ROI GroupBox
self.roiGroupBox = qt.QGroupBox()
self.roiGroupBox.title = "Region of interest"
self.fiducialComboBoxROI = qt.QComboBox()
self.radiusDefinitionWidget = ctk.ctkSliderWidget()
self.radiusDefinitionWidget.singleStep = 1.0
self.radiusDefinitionWidget.minimum = 0.0
self.radiusDefinitionWidget.maximum = 20.0
self.radiusDefinitionWidget.value = 0.0
self.radiusDefinitionWidget.tracking = False
roiBoxLayout = qt.QFormLayout()
roiBoxLayout.addRow("Select a Fiducial:", self.fiducialComboBoxROI)
roiBoxLayout.addRow("Value of radius", self.radiusDefinitionWidget)
self.roiGroupBox.setLayout(roiBoxLayout)
self.ROICollapsibleButton = ctk.ctkCollapsibleButton()
self.ROICollapsibleButton.setText("Selection Region of Interest: ")
self.parent.layout().addWidget(self.ROICollapsibleButton)
ROICollapsibleButtonLayout = qt.QVBoxLayout()
ROICollapsibleButtonLayout.addWidget(inputModelSelectorFrame)
ROICollapsibleButtonLayout.addWidget(addFiducialBox)
ROICollapsibleButtonLayout.addWidget(self.roiGroupBox)
self.ROICollapsibleButton.setLayout(ROICollapsibleButtonLayout)
self.ROICollapsibleButton.checked = True
self.ROICollapsibleButton.enabled = True
# ----------------------------- Propagate Button ----------------------------------
self.propagationCollapsibleButton = ctk.ctkCollapsibleButton()
self.propagationCollapsibleButton.setText(" Propagation: ")
self.parent.layout().addWidget(self.propagationCollapsibleButton)
self.shapesLayout = qt.QHBoxLayout()
self.correspondentShapes = qt.QRadioButton('Correspondent Meshes')
self.correspondentShapes.setChecked(True)
self.nonCorrespondentShapes = qt.QRadioButton('Non Correspondent Meshes')
self.nonCorrespondentShapes.setChecked(False)
self.shapesLayout.addWidget(self.correspondentShapes)
self.shapesLayout.addWidget(self.nonCorrespondentShapes)
self.propagationInputComboBox = slicer.qMRMLCheckableNodeComboBox()
self.propagationInputComboBox.nodeTypes = ['vtkMRMLModelNode']
self.propagationInputComboBox.setMRMLScene(slicer.mrmlScene)
self.propagateButton = qt.QPushButton("Propagate")
self.propagateButton.enabled = True
propagationBoxLayout = qt.QVBoxLayout()
propagationBoxLayout.addLayout(self.shapesLayout)
propagationBoxLayout.addWidget(self.propagationInputComboBox)
propagationBoxLayout.addWidget(self.propagateButton)
self.propagationCollapsibleButton.setLayout(propagationBoxLayout)
self.propagationCollapsibleButton.checked = False
self.propagationCollapsibleButton.enabled = True
self.layout.addStretch(1)
# ------------------------------------------------------------------------------------
# CONNECTIONS
# ------------------------------------------------------------------------------------
self.inputModelSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onCurrentNodeChanged)
self.addFiducialsButton.connect('clicked()', self.onAddButton)
self.fiducialsScaleWidget.connect('valueChanged(double)', self.onFiducialsScaleChanged)
self.surfaceDeplacementCheckBox.connect('stateChanged(int)', self.onSurfaceDeplacementStateChanged)
self.fiducialComboBoxROI.connect('currentIndexChanged(QString)', self.onFiducialComboBoxROIChanged)
self.radiusDefinitionWidget.connect('valueChanged(double)', self.onRadiusValueChanged)
self.radiusDefinitionWidget.connect('valueIsChanging(double)', self.onRadiusValueIsChanging)
self.propagationInputComboBox.connect('checkedNodesChanged()', self.onPropagationInputComboBoxCheckedNodesChanged)
self.propagateButton.connect('clicked()', self.onPropagateButton)
def onCloseScene(obj, event):
# initialize Parameters
globals()["PickAndPaint"] = slicer.util.reloadScriptedModule("PickAndPaint")
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, onCloseScene)
def UpdateInterface(self):
print " OnUpdateInterface "
if self.inputModelSelector.currentNode():
activeInputID = self.inputModelSelector.currentNode().GetID()
selectedFidReflID = self.logic.findIDFromLabel(self.dictionaryInput[activeInputID].dictionaryLandmark,
self.fiducialComboBoxROI.currentText)
if activeInputID != -1:
# Reset all Values
if self.dictionaryInput[activeInputID].dictionaryLandmark and selectedFidReflID:
activeDictLandmarkValue = self.dictionaryInput[activeInputID].dictionaryLandmark[selectedFidReflID]
self.fiducialsScaleWidget.value = activeDictLandmarkValue.fiducialScale
self.radiusDefinitionWidget.value = activeDictLandmarkValue.radiusROI
if activeDictLandmarkValue.mouvementSurfaceStatus:
self.surfaceDeplacementCheckBox.setChecked(True)
else:
self.surfaceDeplacementCheckBox.setChecked(False)
else:
self.radiusDefinitionWidget.value = 0.0
self.fiducialsScaleWidget.value = 2.0
self.logic.UpdateThreeDView(self.inputModelSelector.currentNode(),
self.dictionaryInput,
self.fiducialComboBoxROI.currentText,
"UpdateInterface")
def onCurrentNodeChanged(self):
print " ------------------------------------ onCurrentNodeChanged ------------------------------------"
if self.inputModelSelector.currentNode():
activeInputID = self.inputModelSelector.currentNode().GetID()
if activeInputID:
if not self.dictionaryInput.has_key(activeInputID):
self.dictionaryInput[activeInputID] = self.inputState()
# self.dictionaryInput[activeInputID].inputModelNode = activeInput
fidNode = slicer.vtkMRMLMarkupsFiducialNode()
slicer.mrmlScene.AddNode(fidNode)
self.dictionaryInput[activeInputID].fidNodeID = fidNode.GetID()
# Observers Fiducials Node:
self.dictionaryInput[activeInputID].MarkupAddedEventTag = \
fidNode.AddObserver(fidNode.MarkupAddedEvent, self.onMarkupAddedEvent)
self.dictionaryInput[activeInputID].PointModifiedEventTag = \
fidNode.AddObserver(fidNode.PointModifiedEvent, self.onPointModifiedEvent)
else:
print "Key already exists"
slicer.modules.markups.logic().SetActiveListID(slicer.mrmlScene.GetNodeByID(self.dictionaryInput[activeInputID].fidNodeID))
# Update Fiducial ComboBox and PropFidComboBox
self.fiducialComboBoxROI.clear()
fidNode = slicer.app.mrmlScene().GetNodeByID(self.dictionaryInput[activeInputID].fidNodeID)
if fidNode:
numOfFid = fidNode.GetNumberOfMarkups()
if numOfFid > 0:
if self.fiducialComboBoxROI.count == 0:
for i in range(0, numOfFid):
landmarkLabel = fidNode.GetNthMarkupLabel(i)
self.fiducialComboBoxROI.addItem(landmarkLabel)
for node in self.propagationInputComboBox.checkedNodes():
print node.GetName()
self.propagationInputComboBox.setCheckState(node, 0)
self.logic.UpdateThreeDView(self.inputModelSelector.currentNode(),
self.dictionaryInput,
self.fiducialComboBoxROI.currentText,
'onCurrentNodeChanged')
else:
print ' Input chosen: None! '
def onAddButton(self):
self.interactionNode.SetCurrentInteractionMode(1)
def onFiducialsScaleChanged(self):
print " ------------------------------------ onFiducialsScaleChanged ------------------------------------ "
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
fidNode = slicer.app.mrmlScene().GetNodeByID(self.dictionaryInput[activeInput.GetID()].fidNodeID)
if activeInput:
for value in self.dictionaryInput[activeInput.GetID()].dictionaryLandmark.itervalues():
value.fiducialScale = self.fiducialsScaleWidget.value
print value.fiducialScale
if fidNode:
displayFiducialNode = fidNode.GetMarkupsDisplayNode()
disabledModify = displayFiducialNode.StartModify()
displayFiducialNode.SetGlyphScale(self.fiducialsScaleWidget.value)
displayFiducialNode.SetTextScale(self.fiducialsScaleWidget.value)
displayFiducialNode.EndModify(disabledModify)
else:
print "Error with fiducialNode"
def onSurfaceDeplacementStateChanged(self):
print " ------------------------------------ onSurfaceDeplacementStateChanged ------------------------------------"
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
fidNode = slicer.app.mrmlScene().GetNodeByID(self.dictionaryInput[activeInput.GetID()].fidNodeID)
selectedFidReflID = self.logic.findIDFromLabel(self.dictionaryInput[activeInput.GetID()].dictionaryLandmark,
self.fiducialComboBoxROI.currentText)
if selectedFidReflID:
if self.surfaceDeplacementCheckBox.isChecked():
self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].mouvementSurfaceStatus = True
for key, value in self.dictionaryInput[activeInput.GetID()].dictionaryLandmark.iteritems():
markupsIndex = fidNode.GetMarkupIndexByID(key)
if value.mouvementSurfaceStatus:
value.indexClosestPoint = self.logic.getClosestPointIndex(fidNode,
slicer.util.getNode(activeInput.GetID()),
markupsIndex)
self.logic.replaceLandmark(slicer.util.getNode(activeInput.GetID()),
fidNode,
markupsIndex,
value.indexClosestPoint)
else:
self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].mouvementSurfaceStatus = False
def onFiducialComboBoxROIChanged(self):
print "-------- ComboBox changement --------"
self.UpdateInterface()
def onRadiusValueIsChanging(self):
print " ------------------------------------ onRadiusValueIsChanging ------------------------------------"
def onRadiusValueChanged(self):
print " ------------------------------------ onRadiusValueChanged ---------------------------------------"
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
selectedFidReflID = self.logic.findIDFromLabel(self.dictionaryInput[activeInput.GetID()].dictionaryLandmark,
self.fiducialComboBoxROI.currentText)
if selectedFidReflID and self.radiusDefinitionWidget.value != 0:
activeLandmarkState = self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID]
activeLandmarkState.radiusROI = self.radiusDefinitionWidget.value
# if self.activeDictionaryInputKey:
if not activeLandmarkState.mouvementSurfaceStatus:
self.surfaceDeplacementCheckBox.setChecked(True)
activeLandmarkState.mouvementSurfaceStatus = True
self.radiusDefinitionWidget.setEnabled(False)
listID = self.logic.defineNeighbor(activeInput,
activeLandmarkState.indexClosestPoint,
activeLandmarkState.radiusROI)
self.logic.addArrayFromIdList(listID, activeInput, activeLandmarkState.arrayName)
self.logic.displayROI(activeInput, activeLandmarkState.arrayName)
self.radiusDefinitionWidget.setEnabled(True)
self.radiusDefinitionWidget.tracking = False
def onPropagationInputComboBoxCheckedNodesChanged(self):
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
if activeInput:
self.dictionaryInput[activeInput.GetID()].dictionaryPropInput.clear()
list = self.propagationInputComboBox.checkedNodes()
for model in list:
if model.GetID() != activeInput.GetID():
self.dictionaryInput[activeInput.GetID()].dictionaryPropInput[model.GetID()] = dict()
print self.dictionaryInput[activeInput.GetID()].dictionaryPropInput
def onPropagateButton(self):
print " ------------------------------------ onPropagateButton -------------------------------------- "
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
if self.correspondentShapes.isChecked():
# print "CorrespondentShapes"
self.dictionaryInput[activeInput.GetID()].propagationType = 1
for value in self.dictionaryInput[activeInput.GetID()].dictionaryLandmark.itervalues():
arrayName = value.arrayName
value.propagatedBool = True
for IDModel in self.dictionaryInput[activeInput.GetID()].dictionaryPropInput.iterkeys():
model = slicer.mrmlScene.GetNodeByID(IDModel)
self.logic.propagateCorrespondent(activeInput, model, arrayName)
else:
# print "nonCorrespondentShapes"
self.dictionaryInput[activeInput.GetID()].propagationType = 2
for fiducialID, fiducialState in self.dictionaryInput[activeInput.GetID()].dictionaryLandmark.iteritems():
fiducialState.propagatedBool = True
for IDModel, dict in self.dictionaryInput[activeInput.GetID()].dictionaryPropInput.iteritems():
model = slicer.mrmlScene.GetNodeByID(IDModel)
self.logic.propagateNonCorrespondent(self.dictionaryInput[activeInput.GetID()].fidNodeID,
fiducialID,
fiducialState,
model)
self.UpdateInterface()
def onMarkupAddedEvent (self, obj, event):
if self.inputModelSelector.currentNode():
print" ------------------------------------ onMarkupAddedEvent --------------------------------------"
activeInput = self.inputModelSelector.currentNode()
# print " Number of Fiducial ", obj.GetNumberOfMarkups()
numOfMarkups = obj.GetNumberOfMarkups()
markupID = obj.GetNthMarkupID(numOfMarkups-1)
self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[markupID] = self.fiducialState()
fiducialLabel = ' ' + str(numOfMarkups)
self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[markupID].fiducialLabel = fiducialLabel
obj.SetNthFiducialLabel(numOfMarkups-1, fiducialLabel)
arrayName = activeInput.GetName()+'_'+str(numOfMarkups)+"_ROI"
self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[markupID].arrayName = arrayName
#
self.fiducialComboBoxROI.addItem(fiducialLabel)
self.fiducialComboBoxROI.setCurrentIndex(self.fiducialComboBoxROI.count-1)
self.UpdateInterface()
def onPointModifiedEvent ( self, obj, event):
print " ------------------------------------ onPointModifiedEvent -------------------------------------- "
if self.inputModelSelector.currentNode():
activeInput = self.inputModelSelector.currentNode()
fidNode = slicer.app.mrmlScene().GetNodeByID(self.dictionaryInput[activeInput.GetID()].fidNodeID)
# remove observer to make sure, the callback function won't be disturbed
fidNode.RemoveObserver(self.dictionaryInput[activeInput.GetID()].PointModifiedEventTag)
selectedFiducialID = self.logic.findIDFromLabel(self.dictionaryInput[activeInput.GetID()].dictionaryLandmark,
self.fiducialComboBoxROI.currentText)
activeLandmarkState = self.dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFiducialID]
markupsIndex = fidNode.GetMarkupIndexByID(selectedFiducialID)
if activeLandmarkState.mouvementSurfaceStatus:
activeLandmarkState.indexClosestPoint = self.logic.getClosestPointIndex(fidNode,
slicer.util.getNode(activeInput.GetID()),
markupsIndex)
self.logic.replaceLandmark(slicer.util.getNode(activeInput.GetID()),
fidNode,
markupsIndex,
activeLandmarkState.indexClosestPoint)
# Moving the region if we move the fiducial
if activeLandmarkState.radiusROI > 0 and activeLandmarkState.radiusROI != 0:
listID = self.logic.defineNeighbor(activeInput,
activeLandmarkState.indexClosestPoint,
activeLandmarkState.radiusROI)
self.logic.addArrayFromIdList(listID, activeInput, activeLandmarkState.arrayName)
self.logic.displayROI(activeInput, activeLandmarkState.arrayName)
# Moving the region on propagated models if the region has been propagated before
if self.dictionaryInput[activeInput.GetID()].dictionaryPropInput and activeLandmarkState.propagatedBool:
if self.correspondentShapes.isChecked():
print " self.correspondentShapes.isChecked "
for nodeID in self.dictionaryInput[activeInput.GetID()].dictionaryPropInput.iterkeys():
print nodeID
node = slicer.mrmlScene.GetNodeByID(nodeID)
self.logic.propagateCorrespondent(activeInput, node, activeLandmarkState.arrayName)
else:
print " Not Checked "
for nodeID in self.dictionaryInput[activeInput.GetID()].dictionaryPropInput.iterkeys():
print nodeID
node = slicer.mrmlScene.GetNodeByID(nodeID)
self.logic.propagateNonCorrespondent(self.dictionaryInput[activeInput.GetID()].fidNodeID,
selectedFiducialID,
activeLandmarkState,
node)
time.sleep(0.08)
self.dictionaryInput[activeInput.GetID()].PointModifiedEventTag = \
fidNode.AddObserver(fidNode.PointModifiedEvent, self.onPointModifiedEvent)
def onReload(self, moduleName="PickAndPaint"):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default moduleName.
"""
print " --------------------- RELOAD ------------------------ \n"
globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)
class PickAndPaintLogic:
def __init__(self):
pass
def findIDFromLabel(self, activeInputLandmarkDict, fiducialLabel):
print " findIDFromLabel "
fiducialID = None
for ID, value in activeInputLandmarkDict.iteritems():
if activeInputLandmarkDict[ID].fiducialLabel == fiducialLabel:
fiducialID = ID
break
return fiducialID
def UpdateThreeDView(self, activeInput, dictionaryInput, landmarkLabel = None, functionCaller = None):
print " UpdateThreeDView() "
activeInputID = activeInput.GetID()
if functionCaller == 'onCurrentNodeChanged':
# Fiducial Visibility
for keyInput, valueInput in dictionaryInput.iteritems():
fidNode = slicer.app.mrmlScene().GetNodeByID(valueInput.fidNodeID)
if keyInput != activeInputID:
if valueInput.dictionaryLandmark:
for landID in valueInput.dictionaryLandmark.iterkeys():
print "ID=", landID
landmarkIndex = fidNode.GetMarkupIndexByID(landID)
print "Index= ", landmarkIndex
fidNode.SetNthFiducialVisibility(landmarkIndex, False)
else:
if valueInput.dictionaryLandmark:
for landID in valueInput.dictionaryLandmark.iterkeys():
landmarkIndex = fidNode.GetMarkupIndexByID(landID)
fidNode.SetNthFiducialVisibility(landmarkIndex, True)
if functionCaller == 'UpdateInterface' and landmarkLabel:
selectedFidReflID = self.findIDFromLabel(dictionaryInput[activeInput.GetID()].dictionaryLandmark,
landmarkLabel)
fidNode = slicer.app.mrmlScene().GetNodeByID(dictionaryInput[activeInputID].fidNodeID)
for key in dictionaryInput[activeInputID].dictionaryLandmark.iterkeys():
markupsIndex = fidNode.GetMarkupIndexByID(key)
if key != selectedFidReflID:
fidNode.SetNthMarkupLocked(markupsIndex, True)
else:
fidNode.SetNthMarkupLocked(markupsIndex, False)
displayNode = activeInput.GetModelDisplayNode()
displayNode.SetScalarVisibility(False)
if dictionaryInput[activeInput.GetID()].dictionaryPropInput:
for nodeID in dictionaryInput[activeInput.GetID()].dictionaryPropInput:
node = slicer.mrmlScene.GetNodeByID(nodeID)
node.GetDisplayNode().SetScalarVisibility(False)
if selectedFidReflID:
if dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].radiusROI > 0:
displayNode.SetActiveScalarName(dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].arrayName)
displayNode.SetScalarVisibility(True)
for nodeID in dictionaryInput[activeInput.GetID()].dictionaryPropInput:
node = slicer.mrmlScene.GetNodeByID(nodeID)
arrayID = self.findArray(node.GetPolyData().GetPointData(),
dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].arrayName)
if arrayID != -1:
node.GetDisplayNode().SetActiveScalarName(dictionaryInput[activeInput.GetID()].dictionaryLandmark[selectedFidReflID].arrayName)
node.GetDisplayNode().SetScalarVisibility(True)
def replaceLandmark(self, inputModel, fidNode, fiducialID, indexClosestPoint):
print " --- replaceLandmark --- "
polyData = inputModel.GetPolyData()
fiducialCoord = numpy.zeros(3)
polyData.GetPoints().GetPoint(indexClosestPoint, fiducialCoord)
fidNode.SetNthFiducialPosition(fiducialID,
fiducialCoord[0],
fiducialCoord[1],
fiducialCoord[2])
def getClosestPointIndex(self, fidNode, input, fiducialID):
print " --- getClosestPointIndex --- "
fiducialCoord = numpy.zeros(3)
fidNode.GetNthFiducialPosition(fiducialID, fiducialCoord)
polyData = input.GetPolyData()
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(polyData)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
indexClosestPoint = pointLocator.FindClosestPoint(fiducialCoord)
return indexClosestPoint
def displayROI(self, inputModelNode, scalarName):
print " --- displayROI --- "
polyData = inputModelNode.GetPolyData()
polyData.Modified()
displayNode = inputModelNode.GetModelDisplayNode()
disabledModify = displayNode.StartModify()
displayNode.SetActiveScalarName(scalarName)
displayNode.SetScalarVisibility(True)
displayNode.EndModify(disabledModify)
def findArray(self, pointData, arrayName):
print " --- findArray --- "
arrayID = -1
if pointData.HasArray(arrayName) == 1:
for i in range(0, pointData.GetNumberOfArrays()):
if pointData.GetArrayName(i) == arrayName:
arrayID = i
break
return arrayID
def addArrayFromIdList(self, connectedIdList, inputModelNode, arrayName):
print " --- addArrayFromIdList --- "
polyData = inputModelNode.GetPolyData()
pointData = polyData.GetPointData()
numberofIds = connectedIdList.GetNumberOfIds()
hasArrayInt = pointData.HasArray(arrayName)
if hasArrayInt == 1: # ROI Array found
print " MODIFIED "
pointData.RemoveArray(arrayName)
print " CREATED "
arrayToAdd = vtk.vtkDoubleArray()
arrayToAdd.SetName(arrayName)
for i in range(0, polyData.GetNumberOfPoints()):
arrayToAdd.InsertNextValue(0.0)
for i in range(0, numberofIds):
arrayToAdd.SetValue(connectedIdList.GetId(i), 1.0)
lut = vtk.vtkLookupTable()
tableSize = 2
lut.SetNumberOfTableValues(tableSize)
lut.Build()
lut.SetTableValue(0, 0.23, 0.11, 0.8, 1)
# lut.SetTableValue(1, 0.8, 0.4, 0.9, 1)
lut.SetTableValue(1, 0.8, 0.3, 0.7, 1)
arrayToAdd.SetLookupTable(lut)
pointData.AddArray(arrayToAdd)
polyData.Modified()
return True
def GetConnectedVertices(self, polyData, pointID):
# print " --- GetConnectedVertices --- "
cellList = vtk.vtkIdList()
idsList = vtk.vtkIdList()
idsList.InsertNextId(pointID)
# Get cells that vertex 'pointID' belongs to
polyData.GetPointCells(pointID, cellList)
numberOfIds = cellList.GetNumberOfIds()
for i in range(0, numberOfIds):
# Get points which compose all cells
pointIdList = vtk.vtkIdList()
polyData.GetCellPoints(cellList.GetId(i), pointIdList)
for i in range(0, pointIdList.GetNumberOfIds()):
if pointIdList.GetId(i) != pointID:
idsList.InsertUniqueId(pointIdList.GetId(i))
return idsList
def defineNeighbor(self, inputModelNode, indexClosestPoint , distance):
print" --- defineNeighbor --- "
def add2IdLists(list1, list2):
for i in range(0, list2.GetNumberOfIds()):
list1.InsertUniqueId(list2.GetId(i))
return list1
polyData = inputModelNode.GetPolyData()
connectedVerticesList = self.GetConnectedVertices(polyData, indexClosestPoint)
if distance > 1:
for dist in range(1, int(distance)):
for i in range(0, connectedVerticesList.GetNumberOfIds()):
connectedList = self.GetConnectedVertices(polyData, connectedVerticesList.GetId(i))
verticesListTemp = add2IdLists(connectedVerticesList, connectedList)
connectedVerticesList = verticesListTemp
return connectedVerticesList
def propagateCorrespondent(self, referenceInputModel, propagatedInputModel, arrayName):
print " ---- propagateCorrespondent ---- "
referencePointData = referenceInputModel.GetPolyData().GetPointData()
propagatedPointData = propagatedInputModel.GetPolyData().GetPointData()
arrayIDReference = self.findArray(referencePointData, arrayName)
arrayToPropagate = referencePointData.GetArray(arrayIDReference)
propagatedPointData.AddArray(arrayToPropagate)
self.displayROI(propagatedInputModel, arrayName)
arrayIDPropagated = self.findArray(propagatedPointData, arrayName)
if arrayIDReference != -1:
arrayToPropagate = referencePointData.GetArray(arrayIDReference)
if arrayIDPropagated != -1:
propagatedPointData.RemoveArray(arrayIDPropagated)
propagatedPointData.AddArray(arrayToPropagate)
self.displayROI(propagatedInputModel, arrayName)
else:
print " NO ROI ARRAY FOUND. PLEASE DEFINE ONE BEFORE."
pass
def propagateNonCorrespondent(self, fidNodeID, fiducialID, fiducialState, propagatedInput):
fidNode = slicer.app.mrmlScene().GetNodeByID(fidNodeID)
index = fidNode.GetMarkupIndexByID(fiducialID)
indexClosestPoint = self.getClosestPointIndex(fidNode, propagatedInput, index)
listID = self.defineNeighbor(propagatedInput, indexClosestPoint, fiducialState.radiusROI)
self.addArrayFromIdList(listID, propagatedInput, fiducialState.arrayName)
self.displayROI(propagatedInput, fiducialState.arrayName)
| luciemac/PickAndPaintProject | PickAndPaint.py | Python | apache-2.0 | 37,251 | [
"VTK"
] | 8be09c8cb1f2175e50cf1cc0b788ddbad7cb29bb20c3965c40aea5dab452b09e |
#!/usr/bin/env python3
# -*- mode: python; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set shiftwidth=4 softtabstop=4 expandtab:
"""Support for reading from an NCAR EOL RAF PostgreSQL database of
real-time flight data.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
from datetime import datetime
import logging
import sys
import threading
import pytz
import numpy as np
import psycopg2
from ncharts import exceptions as nc_exc
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RAFDatabase(object):
"""Support for reading time series from NCAR EOL RAF PostgreSQL database.
"""
__cached_connections = {}
__cache_lock = threading.Lock()
@staticmethod
def get_connection(
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None):
"""Return a psycopg2 database connection.
The returned connection can be shared between threads.
If the connection is kept open, then for a given
database, user, host and port, this method
will always return the same connection.
Args:
database, user, host, port password: Parameters needed
to establish a connection to the PostgreSQL database.
Returns:
A psycopg2.connection
Raises:
psycopg2.Error
"""
hashval = hash(database + user + host + str(port))
with RAFDatabase.__cache_lock:
conn = None
if hashval in RAFDatabase.__cached_connections:
conn = RAFDatabase.__cached_connections[hashval]
# connection closed: nonzero if it is closed or broken.
# Mainly just checking here if it is broken, in which
# case, close and attempt a re-connect.
if conn.closed:
try:
conn.rollback()
except psycopg2.Error as exc:
_logger.warning("%s rollback: %s", conn, exc)
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
conn = None
if not conn:
conn = psycopg2.connect(
database=database, user=user,
host=host, port=port, password=password)
conn.set_session(
isolation_level="READ COMMITTED",
readonly=True)
RAFDatabase.__cached_connections[hashval] = conn
return conn
@staticmethod
def close_connection(conn):
"""Close a psycopg2 database connection.
Args:
conn: connection to close.
Raises:
nothing
According to http://initd.org/psycopg/docs/connection.html:
Changed in version 2.5: if the connection is used in a with
statement, the (rollback) method is automatically called if
an exception is raised in the with block.
All connections here are used in a with statement, so we
don't have to call rollback() before close.
"""
with RAFDatabase.__cache_lock:
for (hashval, cconn) in RAFDatabase.__cached_connections.items():
if conn == cconn:
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
break
def __init__(
self,
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None,
table="raf_lrt"):
"""Construct an instance of RAF database connection.
Args:
database, user, host, port, password: Usual parameters
needed to create a PostgreSQL connection.
table: name of table in the database which contains
the time-series data to be read.
Raises:
nc_exc.NoDataException
"""
try:
self.conn = RAFDatabase.get_connection(
database=database, user=user,
host=host, port=port, password=password)
self.database = database
self.user = user
self.host = host
self.port = port
self.password = password
self.table = table
except psycopg2.Error as exc:
raise nc_exc.NoDataException(
"Database not available: {}".format(exc))
def get_variables(self):
"""Fetch pertinent fields from the 'variable_list' table in
the RAF database, such as the list of variable names, their units, and
missing values.
Raises:
nc_exc.NoDataException
"""
try:
with self.conn as conn:
with conn.cursor() as cur:
cur.execute("\
SELECT name, units, long_name, ndims, dims, missing_value from variable_list;")
variables = {}
for var in cur:
dimnames = ["time"]
# make a bold assumption that a second dimension
# is a particle-probe bin number
if var[3] > 1:
dimnames.append("bin")
variables[var[0]] = {
"units": var[1],
"long_name": var[2],
"dimnames": dimnames,
"shape": var[4]
}
return variables
except psycopg2.Error as exc:
# psycopg.connections are thread safe
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"No variables found: {}".format(exc))
def read_times(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Read datetimes from the table within a range.
Raises:
nc_exc.NoDataException
"""
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
# _logger.debug("read_times, table=%s", self.table)
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} WHERE {} >= %s AND {} < %s;"
.format(vname, self.table, vname, vname),
(start_time, end_time))
return [pytz.utc.localize(x[0]).timestamp() for x in cur]
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"read {}: {}".format(vname, exc))
def get_start_time(self):
"""Read first datatime from the database table.
Raises:
nc_exc.NoDataException
"""
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} FETCH FIRST 1 ROW ONLY;"
.format(vname, self.table))
start_time = cur.fetchone()
if not start_time:
_logger.warning("%s: read %s: no data", conn, vname)
raise nc_exc.NoDataException("read {}".format(vname))
return pytz.utc.localize(start_time[0])
except psycopg2.Error as exc:
_logger.warning("%s: read %s: %s", conn, vname, exc)
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException("read {}: {}".format(vname, exc))
def read_time_series(
self,
variables=(),
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max),
size_limit=1000 * 1000 * 1000):
"""Read times and variables from the table within a time period.
For each variable, its missing_value will be read from the
variable_list table. Values read from the time series table
which match the missing_value will be set to float('nan').
Args:
variables: list or tuple of variable names to read.
start_time: starting datetime of data to be read.
end_time: ending datetime of data to be read.
size_limit: attempt to screen outrageous requests.
Returns:
A one element dict, compatible with that returned by
netcdf.read_time_series(), containing for a series_name of '':
{
'time' : list of UTC timestamps,
'data': lists of numpy.ndarray containing
the data for each variable,
'vmap': dict by variable name,
containing the index into the series data for the variable,
'dim2': dict by variable name, of values for second
dimension of the data, such as height.
}
Raises:
nc_exc.NoDataException
"""
total_size = 0
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
vtime = self.read_times(start_time=start_time, end_time=end_time)
# _logger.debug("read_times, len=%d", len(vtime))
total_size += sys.getsizeof(vtime)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many time values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vdata = []
vmap = {}
vdim2 = {}
try:
with self.conn as conn:
with conn.cursor() as cur:
for vname in variables:
operation = "read variable_list"
# _logger.debug("vname=%s",vname)
cur.execute(
"SELECT dims, missing_value from variable_list where name=%s;",
(vname,))
vinfo = cur.fetchall()
# _logger.debug("vinfo=%s",vinfo)
dims = vinfo[0][0]
dims[0] = len(vtime)
missval = vinfo[0][1]
if len(dims) > 1:
# In initial CSET data, dims for CUHSAS_RWOOU
# in variable_list was [1,99]
# Seems that the 99 should have been 100,
# which is what is returned by this:
operation = "read dimension of {}".format(vname)
cur.execute("\
SELECT array_upper({},1) FROM {} FETCH FIRST 1 ROW ONLY;\
".format(vname, self.table))
dimsx = cur.fetchall()[0]
dims[1] = dimsx[0]
# _logger.debug("vname=%s, dims=%s, dimsx=%s", vname, dims, dimsx)
operation = "read {}".format(vname)
cur.execute("\
SELECT {} FROM {} WHERE datetime >= %s AND datetime < %s;\
".format(vname, self.table), (start_time, end_time))
cdata = np.ma.masked_values(np.ndarray(
shape=dims, buffer=np.array(
[v for v in cur], dtype=float)), value=missval)
if isinstance(cdata, np.ma.core.MaskedArray):
# _logger.debug("is MaskedArray")
cdata = cdata.filled(fill_value=float('nan'))
total_size += sys.getsizeof(cdata)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vindex = len(vdata)
vdata.append(cdata)
vmap[vname] = vindex
if len(dims) > 1:
vdim2[vname] = {
"data": [i for i in range(dims[1])],
"name": "bin",
"units": ""
}
return {
'': {
'time': vtime,
'data': vdata,
'vmap': vmap,
'dim2': vdim2,
}
}
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
(operation + ": {}").format(exc))
def test_func():
""" """
db = RAFDatabase(
database="real-time-GV", user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
table="raf_lrt")
variables = db.get_variables()
time0 = db.get_start_time()
_logger.debug("time0=%s", time0)
# times = db.read_times()
# _logger.debug("all times=%s",times)
t1 = pytz.utc.localize(datetime(2015, 6, 29, 15, 10, 0))
t2 = pytz.utc.localize(datetime(2015, 6, 29, 15, 11, 0))
times = db.read_times(start_time=t1, end_time=t2)
_logger.debug("times=%s", times)
data = db.read_time_series(("TASX",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
data = db.read_time_series(("CUHSAS_RWOOU",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
RAFDatabase.close_connection(db)
if __name__ == '__main__':
test_func()
| ncareol/ncharts | ncharts/raf_database.py | Python | bsd-2-clause | 14,670 | [
"NetCDF"
] | b817ae6f0ebcb954efad74ab73f1ae19d34c00cd2cd47ec42931d0c865f39af6 |
#!/usr/bin/env python
"""
Read a file of our JSON BLAST output (which has one JSON object per
line) from stdin and pretty print it to stdout.
"""
from __future__ import print_function
from json import dumps, loads
import sys
for line in sys.stdin:
s = dumps(loads(line[:-1]), sort_keys=True, indent=2)
print('\n'.join([l.rstrip() for l in s.splitlines()]))
| bamueh/dark-matter | bin/cat-json-blast-records.py | Python | mit | 369 | [
"BLAST"
] | 44d3a6ce412a32d88f9ad77c97b7894b06272c9ada4edd622b54cdc4619f304c |
#! /usr/bin/env python
"circFind.py -- "
import sys, os, ConfigParser, argparse
parser = argparse.ArgumentParser(description='circFind is a pipeline to find circular RNA')
parser.add_argument('config_file', help='')
parser.add_argument('-s', '--step', choices=range(8), type=int, help='0:all steps, 1:ori_reads_genome_align, 2:ori_reads_trans_align, 3:split_anchors, 4:anchors_align, 5:split_breakpoints, 6:breakpoints_align, 7:find_circrna')
parser.add_argument('-r', '--remove-temp', dest='rm', action='store_true', help='')
args = parser.parse_args()
#=========================================================
def ori_reads_genome_align(output_dir, genome_idx, fa_file, run):
print '=== ori_reads_genome_align ==='
output = {'genome_unalign': output_dir + 'genome_unalign.fa', 'genome_align': output_dir + 'genome_align.bwt'}
cmd = 'bowtie -f -v3 -p4 --un %s %s %s %s' % (output['genome_unalign'], genome_idx, fa_file, output['genome_align'])
if run == True:
print cmd
os.system(cmd)
return(output)
def ori_reads_trans_align(output_dir, trans_idx, fa_file, run):
print '=== ori_reads_trans_align ==='
output = {'trans_unalign': output_dir + 'trans_unalign.fa', 'trans_align': output_dir + 'trans_align.bwt'}
cmd = 'bowtie -f -v3 -p4 --un %s %s %s %s' % (output['trans_unalign'], trans_idx, fa_file, output['trans_align'])
if run == True:
print cmd
os.system(cmd)
return(output)
def split_anchors(output_dir, fa_file, run):
print '=== split anchors ==='
output = {'anchors': output_dir + 'anchors.fa'}
cmd = 'split_anchors.py %s %s' % (fa_file, output['anchors'])
if run == True:
print cmd
os.system(cmd)
return(output)
def anchors_align(output_dir, genome_idx, fa_file, run):
print '=== anchors align ==='
output = {'anchors_align': output_dir + 'anchors.align.bwt'}
cmd = 'bowtie -f -v3 --best --strata -k11 -m10 -p4 %s %s %s' % (genome_idx, fa_file, output['anchors_align'])
if run == True:
print cmd
os.system(cmd)
return(output)
def split_breakpoints(output_dir, bwt, run):
print '=== split reads at breakpoints ==='
output = {'breakpoints': output_dir + 'breakpoints.fa'}
cmd = 'split_breakpoints.py %s %s' % (bwt, output['breakpoints'])
if run == True:
print cmd
os.system(cmd)
return(output)
def breakpoints_align(output_dir, genome_idx, fa_file, run):
print '=== breakpoints mapping ==='
output = {'breakpoints_align': output_dir + 'breakpoints.align.bwt'}
cmd = 'bowtie -f -v3 --best --strata -k11 -m10 -p4 %s %s %s' % (genome_idx, fa_file, output['breakpoints_align'])
if run == True:
print cmd
os.system(cmd)
return(output)
def find_circrna(output_dir, sample_name, genome_fa, bwt, run):
print "=== find circRNAs ==="
output = {'circ': output_dir + sample_name + '.circ.txt'}
cmds = ['find_circ.py %s %s %s.circ' % (bwt, genome_fa, bwt),
'cat %s.circ|sort -k1,1 -k2,2n|uniq > %s' % (bwt, output['circ']),
]
for cmd in cmds:
if run == True:
print cmd
os.system(cmd)
#================================================================
cf = ConfigParser.ConfigParser()
cf.read(args.config_file)
for sec in cf.sections():
sample_name = cf.get(sec, 'sample_name')
reads_file = cf.get(sec, 'reads_file')
genome_index = cf.get(sec, 'genome_index')
genome_seq = cf.get(sec, 'genome_seq')
trans_index = cf.get(sec, 'trans_index')
temp_dir = './' + sample_name + '/temp/'
output_dir = './' + sample_name + '/output/'
os.system('mkdir -p ' + temp_dir)
os.system('mkdir -p ' + output_dir)
tag = [False, False, False, False, False, False, False, False, False]
if args.step > 0:
tag[args.step] = True
elif args.step == 0:
tag = [True, True, True, True, True, True, True, True, True]
else:
print 'please specify the -s/--step'
sys.exit()
ori_reads_genome_align_out = ori_reads_genome_align(temp_dir, genome_index, reads_file, tag[1])
ori_reads_trans_align_out = ori_reads_trans_align(temp_dir, trans_index, ori_reads_genome_align_out['genome_unalign'], tag[2])
split_anchors_out = split_anchors(temp_dir, ori_reads_trans_align_out['trans_unalign'], tag[3])
anchors_align_out = anchors_align(temp_dir, genome_index, split_anchors_out['anchors'], tag[4])
split_breakpoints_out = split_breakpoints(temp_dir, anchors_align_out['anchors_align'], tag[5])
breakpoints_align_out = breakpoints_align(temp_dir, genome_index, split_breakpoints_out['breakpoints'], tag[6])
find_circrna_out = find_circrna(output_dir, sample_name, genome_seq, breakpoints_align_out['breakpoints_align'], tag[7])
if args.rm == True:
print "=== clean the temp directory ==="
os.system('rm -r %s' % temp_dir)
| bioxfu/circRNAFinder | src/circRNAFind.py | Python | gpl-3.0 | 4,985 | [
"Bowtie"
] | 41ddabc441eb1c6fb20cb2d6509ef92a9cd4ee6e0e5848033109cea368a3cc31 |
""" Test the FTS3Utilities"""
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC import S_OK, S_ERROR
__RCSID__ = "$Id $"
import unittest
import mock
import datetime
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder, \
FTS3Serializable, \
groupFilesByTarget, \
generatePossibleTransfersBySources, \
selectUniqueSourceforTransfers, \
FTS3ServerPolicy
import json
class FakeClass( FTS3Serializable ):
""" Just a fake class"""
_attrToSerialize = ['string', 'date', 'dic', 'sub']
def __init__( self ):
self.string = ''
self.date = None
self.dic = {}
class TestFTS3Serialization( unittest.TestCase ):
""" Test the FTS3 JSON serialization mechanizme with FTS3JSONEncoder,
FTS3JSONDecoder, FTS3Serializable"""
def test_01_basic( self ):
""" Basic json transfer"""
obj = FakeClass()
obj.string = 'tata'
obj.date = datetime.datetime.utcnow().replace( microsecond = 0 )
obj.dic = { 'a' : 1}
obj.notSerialized = 'Do not'
obj2 = json.loads( obj.toJSON(), cls = FTS3JSONDecoder )
self.assertTrue( obj.string == obj2.string )
self.assertTrue( obj.date == obj2.date )
self.assertTrue( obj.dic == obj2.dic )
self.assertTrue( not hasattr( obj2, 'notSerialized' ) )
def test_02_subobjects( self ):
""" Try setting as attribute an object """
class NonSerializable( object ):
""" Fake class not inheriting from FTS3Serializable"""
pass
obj = FakeClass()
obj.sub = NonSerializable()
with self.assertRaises( TypeError ):
obj.toJSON()
obj.sub = FakeClass()
obj.sub.string = 'pipo'
obj2 = json.loads( obj.toJSON(), cls = FTS3JSONDecoder )
self.assertTrue( obj.sub.string == obj2.sub.string )
def mock__checkSourceReplicas( ftsFiles ):
succ = {}
failed = {}
for ftsFile in ftsFiles:
if hasattr( ftsFile, 'fakeAttr_possibleSources' ):
succ[ ftsFile.lfn] = dict.fromkeys( getattr( ftsFile, 'fakeAttr_possibleSources' ) )
else:
failed[ftsFile.lfn] = 'No such file or directory'
return S_OK( {'Successful':succ, 'Failed':failed} )
class TestFileGrouping( unittest.TestCase ):
""" Testing all the grouping functions of FTS3Utilities
"""
def setUp( self ):
self.f1 = FTS3File()
self.f1.fakeAttr_possibleSources = ['Src1', 'Src2']
self.f1.lfn = 'f1'
self.f1.targetSE = 'target1'
self.f2 = FTS3File()
self.f2.fakeAttr_possibleSources = ['Src2', 'Src3']
self.f2.lfn = 'f2'
self.f2.targetSE = 'target2'
self.f3 = FTS3File()
self.f3.fakeAttr_possibleSources = ['Src4']
self.f3.lfn = 'f3'
self.f3.targetSE = 'target1'
# File does not exist :-)
self.f4 = FTS3File()
self.f4.lfn = 'f4'
self.f4.targetSE = 'target3'
self.allFiles = [self.f1, self.f2, self.f3, self.f4 ]
def test_01_groupFilesByTarget( self ):
# empty input
self.assertTrue( groupFilesByTarget( [] )['Value'] == {} )
res = groupFilesByTarget( self.allFiles )
self.assertTrue(res['OK'])
groups = res['Value']
self.assertTrue( self.f1 in groups['target1'] )
self.assertTrue( self.f2 in groups['target2'] )
self.assertTrue( self.f3 in groups['target1'] )
self.assertTrue( self.f4 in groups['target3'] )
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas', side_effect = mock__checkSourceReplicas )
def test_02_generatePossibleTransfersBySources( self, _mk_checkSourceReplicas ):
""" Get all the possible sources"""
# We assume here that they all go to the same target
res = generatePossibleTransfersBySources( self.allFiles )
self.assertTrue(res['OK'])
groups = res['Value']
self.assertTrue( self.f1 in groups['Src1'] )
self.assertTrue( self.f1 in groups['Src2'] )
self.assertTrue( self.f2 in groups['Src2'] )
self.assertTrue( self.f2 in groups['Src3'] )
self.assertTrue( self.f3 in groups['Src4'] )
self.assertTrue( self.f2 in groups['Src3'] )
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas', side_effect = mock__checkSourceReplicas )
def test_03_selectUniqueSourceforTransfers( self, _mk_checkSourceReplicas ):
""" Suppose they all go to the same target """
groupBySource = generatePossibleTransfersBySources( self.allFiles )['Value']
res = selectUniqueSourceforTransfers( groupBySource )
self.assertTrue(res['OK'])
uniqueSources = res['Value']
# Src1 and Src2 should not be here because f1 and f2 should be taken from Src2
self.assertTrue( sorted( uniqueSources.keys() ) == sorted( ['Src2', 'Src4'] ) )
self.assertTrue( self.f1 in uniqueSources['Src2'] )
self.assertTrue( self.f2 in uniqueSources['Src2'] )
self.assertTrue( self.f3 in uniqueSources['Src4'] )
fakeFTS3Server = "https://fts-fake.cern.ch:8446"
def mock__failoverServerPolicy(_attempt):
return fakeFTS3Server
def mock__randomServerPolicy(_attempt):
return fakeFTS3Server
def mock__sequenceServerPolicy(_attempt):
return fakeFTS3Server
def mock__OKFTSServerStatus(ftsServer):
return S_OK( ftsServer )
def mock__ErrorFTSServerStatus(ftsServer):
return S_ERROR( ftsServer )
class TestFTS3ServerPolicy ( unittest.TestCase ):
""" Testing FTS3 ServerPolicy selection """
def setUp(self):
self.fakeServerList = ["server_0", "server_1", "server_2"]
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect = mock__OKFTSServerStatus )
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._sequenceServerPolicy', side_effect = mock__sequenceServerPolicy )
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._randomServerPolicy', side_effect = mock__randomServerPolicy )
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._failoverServerPolicy', side_effect = mock__failoverServerPolicy )
def testCorrectServerPolicyIsUsed( self, mockFailoverFunc, mockRandomFunc, mockSequenceFunc, mockFTSServerStatus ):
" Test correct server policy method is called "
obj = FTS3ServerPolicy(self.fakeServerList, "Sequence")
obj.chooseFTS3Server()
self.assertTrue(mockSequenceFunc.called)
obj = FTS3ServerPolicy(self.fakeServerList, "Random")
obj.chooseFTS3Server()
self.assertTrue(mockRandomFunc.called)
obj = FTS3ServerPolicy(self.fakeServerList, "Failover")
obj.chooseFTS3Server()
self.assertTrue(mockFailoverFunc.called)
# random policy should be selected for an invalid policy
obj = FTS3ServerPolicy(self.fakeServerList, "InvalidPolicy")
obj.chooseFTS3Server()
self.assertTrue(mockRandomFunc.called)
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect = mock__ErrorFTSServerStatus )
def testFailoverServerPolicy( self, mockFTSServerStatus):
""" Test if the failover server policy returns server at a given position"""
obj = FTS3ServerPolicy(self.fakeServerList, "Failover")
for i in range(len(self.fakeServerList)):
self.assertEquals('server_%d'%i, obj._failoverServerPolicy(i))
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect = mock__ErrorFTSServerStatus )
def testSequenceServerPolicy( self, mockFTSServerStatus):
""" Test if the sequence server policy selects the servers Sequentially """
obj = FTS3ServerPolicy(self.fakeServerList, "Sequence")
for i in range(len(self.fakeServerList)):
self.assertEquals('server_%d'%i, obj._sequenceServerPolicy(i))
self.assertEquals('server_0',obj._sequenceServerPolicy(i))
@mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect = mock__ErrorFTSServerStatus )
def testRandomServerPolicy( self, mockFTSServerStatus):
""" Test if the random server policy does not selects the same server multiple times """
obj = FTS3ServerPolicy(self.fakeServerList, "Random")
serverSet = set()
for i in range(len(self.fakeServerList)):
serverSet.add(obj._randomServerPolicy(i))
self.assertEquals(len(serverSet), len(self.fakeServerList))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestFTS3Serialization )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestFileGrouping ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TestFTS3ServerPolicy ) )
unittest.TextTestRunner( verbosity = 2 ).run( suite )
| Andrew-McNab-UK/DIRAC | DataManagementSystem/private/test/Test_FTS3Utilities.py | Python | gpl-3.0 | 8,942 | [
"DIRAC"
] | 145da4a0e666679e99f0828560ed81c07244ef1e25d46c87a6abe5a70de9805b |
#fcc paracrystal model
#note model title and parameter table are automatically inserted
#note - calculation requires double precision
r"""
.. warning:: This model and this model description are under review following
concerns raised by SasView users. If you need to use this model,
please email [email protected] for the latest situation. *The
SasView Developers. September 2018.*
Definition
----------
Calculates the scattering from a **face-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized by
a Gaussian distribution.
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \frac{\text{scale}}{V_p} V_\text{lattice} P(q) Z(q)
where *scale* is the volume fraction of spheres, $V_p$ is the volume of
the primary particle, $V_\text{lattice}$ is a volume correction for the crystal
structure, $P(q)$ is the form factor of the sphere (normalized), and $Z(q)$
is the paracrystalline structure factor for a face-centered cubic structure.
Equation (1) of the 1990 reference\ [#Matsuoka1990]_ is used to calculate
$Z(q)$, using equations (23)-(25) from the 1987 paper\ [#Matsuoka1987]_ for
$Z1$, $Z2$, and $Z3$.
The lattice correction (the occupied volume of the lattice) for a
face-centered cubic structure of particles of radius $R$ and nearest
neighbor separation $D$ is
.. math::
V_\text{lattice} = \frac{16\pi}{3}\frac{R^3}{\left(D\sqrt{2}\right)^3}
The distortion factor (one standard deviation) of the paracrystal is
included in the calculation of $Z(q)$
.. math::
\Delta a = gD
where $g$ is a fractional distortion based on the nearest neighbor distance.
.. figure:: img/fcc_geometry.jpg
Face-centered cubic lattice.
For a crystal, diffraction peaks appear at reduced q-values given by
.. math::
\frac{qD}{2\pi} = \sqrt{h^2 + k^2 + l^2}
where for a face-centered cubic lattice $h, k , l$ all odd or all
even are allowed and reflections where $h, k, l$ are mixed odd/even
are forbidden. Thus the peak positions correspond to (just the first 5)
.. math::
\begin{array}{cccccc}
q/q_0 & 1 & \sqrt{4/3} & \sqrt{8/3} & \sqrt{11/3} & \sqrt{4} \\
\text{Indices} & (111) & (200) & (220) & (311) & (222)
\end{array}
.. note::
The calculation of $Z(q)$ is a double numerical integral that must be
carried out with a high density of points to properly capture the sharp
peaks of the paracrystalline scattering. So be warned that the calculation
is slow. Fitting of any experimental data must be resolution smeared for
any meaningful fit. This makes a triple integral which may be very slow.
The 2D (Anisotropic model) is based on the reference below where $I(q)$ is
approximated for 1d scattering. Thus the scattering pattern for 2D may not
be accurate particularly at low $q$. For general details of the calculation
and angular dispersions for oriented particles see :ref:`orientation`.
Note that we are not responsible for any incorrectness of the
2D model computation.
.. figure:: img/parallelepiped_angle_definition.png
Orientation of the crystal with respect to the scattering plane, when
$\theta = \phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).
References
----------
.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)
1754-1765 (Original Paper)
.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)
3854-3856 (Corrections to FCC and BCC lattice structure calculation)
Authorship and Verification
---------------------------
* **Author:** NIST IGOR/DANSE **Date:** pre 2010
* **Last Modified by:** Paul Butler **Date:** September 29, 2016
* **Last Reviewed by:** Richard Heenan **Date:** March 21, 2016
"""
import numpy as np
from numpy import inf, pi
name = "fcc_paracrystal"
title = "Face-centred cubic lattic with paracrystalline distortion"
description = """
Calculates the scattering from a **face-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = "shape:paracrystal"
single = False
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["dnn", "Ang", 220, [-inf, inf], "", "Nearest neighbour distance"],
["d_factor", "", 0.06, [-inf, inf], "", "Paracrystal distortion factor"],
["radius", "Ang", 40, [0, inf], "volume", "Particle radius"],
["sld", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Particle scattering length density"],
["sld_solvent", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Solvent scattering length density"],
["theta", "degrees", 60, [-360, 360], "orientation", "c axis to beam angle"],
["phi", "degrees", 60, [-360, 360], "orientation", "rotation about beam"],
["psi", "degrees", 60, [-360, 360], "orientation", "rotation about c axis"]
]
# pylint: enable=bad-whitespace, line-too-long
source = ["lib/sas_3j1x_x.c", "lib/gauss150.c", "lib/sphere_form.c", "fcc_paracrystal.c"]
def random():
"""Return a random parameter set for the model."""
# copied from bcc_paracrystal
radius = 10**np.random.uniform(1.3, 4)
d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius*4/np.sqrt(2)/dnn_fraction
pars = dict(
#sld=1, sld_solvent=0, scale=1, background=1e-32,
dnn=dnn,
d_factor=d_factor,
radius=radius,
)
return pars
# april 10 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!
# TODO: fix the 2d tests
q = 4.*pi/220.
tests = [
[{}, [0.001, q, 0.215268], [0.275164706668, 5.7776842567, 0.00958167119232]],
#[{}, (-0.047, -0.007), 238.103096286],
#[{}, (0.053, 0.063), 0.863609587796],
]
| SasView/sasmodels | sasmodels/models/fcc_paracrystal.py | Python | bsd-3-clause | 6,252 | [
"CRYSTAL",
"Gaussian"
] | feea57174af6b6dd44c13fd54f78f19c0c485bcd9f566b61bc0693ee8b65c254 |
#
# Copyright 2011-2021 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""MOS Standard Star Image Recipe for Megara"""
import astropy.io.fits as fits
import astropy.units as u
import astropy.wcs
from astropy import constants as const
from scipy.interpolate import interp1d
from numina.types.datatype import PlainPythonType
from numina.types.datatype import ListOfType
from numina.types.multitype import MultiType
from numina.array.numsplines import AdaptiveLSQUnivariateSpline
from numina.core import Result, Parameter
from numina.core.requirements import Requirement
from numina.core.validator import range_validator
from numina.exceptions import RecipeError
from numina.types.array import ArrayType
from megaradrp.instrument.focalplane import FocalPlaneConf
from megaradrp.ntypes import Point2D
from megaradrp.ntypes import ProcessedRSS, ProcessedFrame, ProcessedSpectrum
from megaradrp.ntypes import ReferenceSpectrumTable, ReferenceExtinctionTable
from megaradrp.ntypes import MasterSensitivity
from megaradrp.processing.extractobj import extract_star, generate_sensitivity
from megaradrp.processing.extractobj import mix_values, compute_broadening
from megaradrp.processing.centroid import calc_centroid_brightest
from megaradrp.recipes.scientific.base import ImageRecipe
class MOSStandardRecipe(ImageRecipe):
"""Process MOS Standard Star Recipe.
This recipe processes a set of images
obtained in **MOS Stardard Star image** mode and returns
the total flux of the star.
See Also
--------
megaradrp.recipes.calibration.lcbstdstar.LCBStandardRecipe
Notes
-----
Images provided by `obresult` are trimmed and corrected
from overscan, bad pixel mask (if `master_bpm` is not None),
bias, dark current (if `master_dark` is not None) and
slit-flat (if `master_slitflat` is not None).
Images thus corrected are then stacked using the median.
The result of the combination is saved as an intermediate result, named
'reduced_image.fits'. This combined image is also returned in the field
`reduced_image` of the recipe result.
The apertures in the 2D image are extracted, using the information in
`master_apertures` and resampled according to the wavelength calibration in
`master_wlcalib`. Then is divided by the `master_fiberflat`.
The resulting RSS is saved as an intermediate
result named 'reduced_rss.fits'. This RSS is also returned in the field
`reduced_rss` of the recipe result.
The sky is subtracted by combining the the fibers marked as `SKY`
in the fibers configuration. The RSS with sky subtracted is returned ini the
field `final_rss` of the recipe result.
The flux of the star is computed by adding the 7 fibers corresponding to the bundle
containing the star and returned as `star_spectrum`.
"""
position = Requirement(Point2D, "Position of the reference object", optional=True)
# nrings = 1
reference_spectrum = Requirement(ReferenceSpectrumTable, "Spectrum of reference star")
reference_spectrum_velocity = Parameter(0.0, 'Radial velocity of reference spectrum')
reference_extinction = Requirement(ReferenceExtinctionTable, "Reference extinction")
degrade_resolution_target = Parameter('object', 'Spectrum with higher resolution',
choices=['object']
)
# TODO: Implement the possibility of the reference having higher resolution
# degrade_resolution_target = Parameter('object', 'Spectrum with higher resolution',
# choices=['object', 'reference']
# )
degrade_resolution_method = Parameter('fixed', 'Method to degrade the resolution',
choices=['none', 'fixed', 'auto']
)
sigma_resolution = Parameter(20.0, 'sigma Gaussian filter to degrade resolution ')
smoothing_knots = Requirement(
MultiType(
PlainPythonType(ref=3, validator=range_validator(minval=3)),
ListOfType(PlainPythonType(ref=0.0), nmin=3)
),
description='List of nodes or number of nodes for sensitivity smoothing',
optional=True
)
reduced_image = Result(ProcessedFrame)
final_rss = Result(ProcessedRSS)
reduced_rss = Result(ProcessedRSS)
sky_rss = Result(ProcessedRSS)
star_spectrum = Result(ProcessedSpectrum)
master_sensitivity = Result(MasterSensitivity)
sensitivity_raw = Result(ProcessedSpectrum)
fiber_ids = Result(ArrayType)
sigma = Result(float)
def run(self, rinput):
self.logger.info('starting MOSStandardRecipe reduction')
# Try to guard against receiving here something
# that is not in magAB
# TODO: implement this in ReferenceSpectrumTable
maxm = max(rinput.reference_spectrum[:, 1])
if maxm > 100:
# If the column here has values greater than 100
# this could not be a magnitude
raise RecipeError("the maximum flux of 'reference_spectrum' is > 100, "
"check the flux unit (it has to be magAB)")
reduced2d, rss_data = super(MOSStandardRecipe, self).base_run(rinput)
self.logger.info('start sky subtraction')
final, origin, sky = self.run_sky_subtraction(
rss_data,
sky_rss=rinput.sky_rss,
ignored_sky_bundles=rinput.ignored_sky_bundles
)
self.logger.info('end sky subtraction')
# 1 + 6 for first ring
# 1 + 6 + 12 for second ring
# 1 + 6 + 12 + 18 for third ring
# 1 + 6 * Sum_i=0^n = 1 + 3 * n * (n +1)
# In MOS, only 1 ring around central point
# If position is None, find the brightest spaxel
# and use the centroid
if rinput.position is None:
self.logger.info('finding centroid of brightest spaxel')
extraction_region = [1000, 3000]
nrings = rinput.nrings
position = calc_centroid_brightest(final, extraction_region, nrings)
else:
position = rinput.position
self.logger.info('central position is %s', position)
self.logger.debug('adding %d nrings', 1)
npoints = 7
self.logger.debug('adding %d fibers', npoints)
fp_conf = FocalPlaneConf.from_img(final)
spectra_pack = extract_star(final, position, npoints,
fp_conf, logger=self.logger)
spectrum, colids, wl_cover1, wl_cover2 = spectra_pack
star_spectrum = fits.PrimaryHDU(spectrum, header=final[0].header)
rad_vel = rinput.reference_spectrum_velocity * u.km / u.s
factor = 1 + rad_vel / const.c
star_interp = interp1d(rinput.reference_spectrum[:, 0] / factor,
rinput.reference_spectrum[:, 1])
extinc_interp = interp1d(rinput.reference_extinction[:, 0],
rinput.reference_extinction[:, 1])
fiber_ids = [colid + 1 for colid in colids]
wcsl = astropy.wcs.WCS(final[0].header)
wl_aa, response_m, response_r = mix_values(wcsl, spectrum, star_interp)
if rinput.degrade_resolution_method == 'none':
sigma = 0
self.logger.info('no broadening')
elif rinput.degrade_resolution_method == 'fixed':
sigma = rinput.sigma_resolution
self.logger.info('fixed sigma=%3.0f', sigma)
elif rinput.degrade_resolution_method == 'auto':
self.logger.info('compute auto broadening')
offset_broad, sigma_broad = compute_broadening(
response_r.copy(), response_m.copy(), sigmalist=range(1, 101),
remove_mean=False, frac_cosbell=0.10, zero_padding=50,
fminmax=(0.003, 0.3), naround_zero=25, nfit_peak=21
)
sigma = sigma_broad
self.logger.info('computed sigma=%3.0f', sigma)
else:
msg = f"'degrade_resolution_method' has value {rinput.degrade_resolution_method}"
raise ValueError(msg)
sens_raw = generate_sensitivity(final, spectrum, star_interp, extinc_interp, wl_cover1, wl_cover2, sigma)
# Compute smoothed version
self.logger.info('compute smoothed sensitivity')
sens = sens_raw.copy()
i_knots = rinput.smoothing_knots
self.logger.debug(f'using adaptive spline with t={i_knots} interior knots')
spl = AdaptiveLSQUnivariateSpline(x=wl_aa.value, y=sens_raw.data, t=i_knots)
sens.data = spl(wl_aa.value)
if self.intermediate_results:
import matplotlib.pyplot as plt
plt.plot(wl_aa, sens_raw.data, 'b')
plt.plot(wl_aa, sens.data, 'r')
plt.savefig('smoothed.png')
plt.close()
self.logger.info('end MOSStandardRecipe reduction')
return self.create_result(
reduced_image=reduced2d,
final_rss=final,
reduced_rss=origin,
sky_rss=sky,
star_spectrum=star_spectrum,
master_sensitivity=sens,
sensitivity_raw=sens_raw,
fiber_ids=fiber_ids,
sigma=sigma
)
| sergiopasra/megaradrp | megaradrp/recipes/calibration/mosstdstar.py | Python | gpl-3.0 | 9,428 | [
"Gaussian"
] | 4660465a336799be9e60332fe233ea4e889026cfe92763b0502de0097703ab6b |
"""
This test script is adopted from:
https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
"""
import pkgutil
import types
import importlib
import warnings
import scipy
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(scipy)) == len(set(dir(scipy)))
# Historically SciPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
PUBLIC_MODULES = ["scipy." + s for s in [
"cluster",
"cluster.vq",
"cluster.hierarchy",
"constants",
"fft",
"fftpack",
"integrate",
"interpolate",
"io",
"io.arff",
"io.matlab",
"io.wavfile",
"linalg",
"linalg.blas",
"linalg.cython_blas",
"linalg.lapack",
"linalg.cython_lapack",
"linalg.interpolative",
"misc",
"ndimage",
"odr",
"optimize",
"signal",
"signal.windows",
"sparse",
"sparse.linalg",
"sparse.csgraph",
"spatial",
"spatial.distance",
"spatial.transform",
"special",
"stats",
"stats.contingency",
"stats.distributions",
"stats.mstats",
"stats.qmc",
"stats.sampling"
]]
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
# These private modules support will be removed in SciPy v2.0.0
PRIVATE_BUT_PRESENT_MODULES = [
'scipy.constants.codata',
'scipy.constants.constants',
'scipy.fftpack.basic',
'scipy.fftpack.convolve',
'scipy.fftpack.helper',
'scipy.fftpack.pseudo_diffs',
'scipy.fftpack.realtransforms',
'scipy.integrate.odepack',
'scipy.integrate.quadpack',
'scipy.integrate.dop',
'scipy.integrate.lsoda',
'scipy.integrate.vode',
'scipy.interpolate.dfitpack',
'scipy.interpolate.fitpack',
'scipy.interpolate.fitpack2',
'scipy.interpolate.interpnd',
'scipy.interpolate.interpolate',
'scipy.interpolate.ndgriddata',
'scipy.interpolate.polyint',
'scipy.interpolate.rbf',
'scipy.io.arff.arffread',
'scipy.io.harwell_boeing',
'scipy.io.idl',
'scipy.io.mmio',
'scipy.io.netcdf',
'scipy.io.matlab.byteordercodes',
'scipy.io.matlab.mio',
'scipy.io.matlab.mio4',
'scipy.io.matlab.mio5',
'scipy.io.matlab.mio5_params',
'scipy.io.matlab.mio5_utils',
'scipy.io.matlab.mio_utils',
'scipy.io.matlab.miobase',
'scipy.io.matlab.streams',
'scipy.linalg.basic',
'scipy.linalg.decomp',
'scipy.linalg.decomp_cholesky',
'scipy.linalg.decomp_lu',
'scipy.linalg.decomp_qr',
'scipy.linalg.decomp_schur',
'scipy.linalg.decomp_svd',
'scipy.linalg.flinalg',
'scipy.linalg.matfuncs',
'scipy.linalg.misc',
'scipy.linalg.special_matrices',
'scipy.misc.common',
'scipy.misc.doccer',
'scipy.ndimage.filters',
'scipy.ndimage.fourier',
'scipy.ndimage.interpolation',
'scipy.ndimage.measurements',
'scipy.ndimage.morphology',
'scipy.odr.models',
'scipy.odr.odrpack',
'scipy.optimize.cobyla',
'scipy.optimize.cython_optimize',
'scipy.optimize.lbfgsb',
'scipy.optimize.linesearch',
'scipy.optimize.minpack',
'scipy.optimize.minpack2',
'scipy.optimize.moduleTNC',
'scipy.optimize.nonlin',
'scipy.optimize.optimize',
'scipy.optimize.slsqp',
'scipy.optimize.tnc',
'scipy.optimize.zeros',
'scipy.signal.bsplines',
'scipy.signal.filter_design',
'scipy.signal.fir_filter_design',
'scipy.signal.lti_conversion',
'scipy.signal.ltisys',
'scipy.signal.signaltools',
'scipy.signal.spectral',
'scipy.signal.spline',
'scipy.signal.waveforms',
'scipy.signal.wavelets',
'scipy.signal.windows.windows',
'scipy.sparse.base',
'scipy.sparse.bsr',
'scipy.sparse.compressed',
'scipy.sparse.construct',
'scipy.sparse.coo',
'scipy.sparse.csc',
'scipy.sparse.csr',
'scipy.sparse.data',
'scipy.sparse.dia',
'scipy.sparse.dok',
'scipy.sparse.extract',
'scipy.sparse.lil',
'scipy.sparse.linalg.dsolve',
'scipy.sparse.linalg.eigen',
'scipy.sparse.linalg.interface',
'scipy.sparse.linalg.isolve',
'scipy.sparse.linalg.matfuncs',
'scipy.sparse.sparsetools',
'scipy.sparse.spfuncs',
'scipy.sparse.sputils',
'scipy.spatial.ckdtree',
'scipy.spatial.kdtree',
'scipy.spatial.qhull',
'scipy.spatial.transform.rotation',
'scipy.special.add_newdocs',
'scipy.special.basic',
'scipy.special.cython_special',
'scipy.special.orthogonal',
'scipy.special.sf_error',
'scipy.special.specfun',
'scipy.special.spfun_stats',
'scipy.stats.biasedurn',
'scipy.stats.kde',
'scipy.stats.morestats',
'scipy.stats.mstats_basic',
'scipy.stats.mstats_extras',
'scipy.stats.mvn',
'scipy.stats.statlib',
'scipy.stats.stats',
]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
SKIP_LIST = [
'scipy.conftest',
'scipy.version',
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=scipy.__path__,
prefix=scipy.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'scipy.char',
'scipy.rec',
'scipy.emath',
'scipy.math',
'scipy.random',
'scipy.ctypeslib',
'scipy.ma'
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames.
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("scipy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
| mdhaber/scipy | scipy/_lib/tests/test_public_api.py | Python | bsd-3-clause | 9,926 | [
"NetCDF"
] | b69dcc4a44d2df0fa8778445fea3aed0c1f0331d1c0943a3fff22572948d918d |
import os
import re
from collections import defaultdict
from operator import itemgetter
import logging
import pandas
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from george import kernels
import matplotlib.pyplot as plt
import numpy as np
import george
import emcee
import StarData
import SpectralTypeRelations
def classify_filename(fname, type='bright'):
"""
Given a CCF filename, it classifies the star combination, temperature, metallicity, and vsini
:param fname:
:return:
"""
# First, remove any leading directories
fname = fname.split('/')[-1]
# Star combination
m1 = re.search('\.[0-9]+kps', fname)
stars = fname[:m1.start()]
star1 = stars.split('+')[0].replace('_', ' ')
star2 = stars.split('+')[1].split('_{}'.format(type))[0].replace('_', ' ')
# secondary star vsini
vsini = float(fname[m1.start() + 1:].split('kps')[0])
# Temperature
m2 = re.search('[0-9]+\.0K', fname)
temp = float(m2.group()[:-1])
# logg
m3 = re.search('K\+[0-9]\.[0-9]', fname)
logg = float(m3.group()[1:])
# metallicity
metal = float(fname.split(str(logg))[-1])
return star1, star2, vsini, temp, logg, metal
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
"""
Searches the given directory for CCF files, and classifies
by star, temperature, metallicity, and vsini
:param basedir: The directory to search for CCF files
:keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
:keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
primary = []
secondary = []
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
if primary_name is not None and star1.lower() != primary_name.lower():
continue
if secondary_name is not None and star2.lower() != secondary_name.lower():
continue
vel, corr = np.loadtxt(fname, unpack=True)
fcn = spline(vel, corr)
ccf.append(fcn(vel_arr))
primary.append(star1)
secondary.append(star2)
vsini_values.append(vsini)
temperature.append(temp)
gravity.append(logg)
metallicity.append(metal)
# Make a pandas dataframe with all this data
df = pandas.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
return df
def get_ccf_summary(basedir, vel_arr=np.arange(-900.0, 900.0, 0.1), velocity='highest', type='bright'):
"""
Very similar to get_ccf_data, but does it in a way that is more memory efficient
:param basedir: The directory to search for CCF files
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
file_dict = defaultdict(lambda: defaultdict(list))
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
file_dict[star1][star2].append(fname)
# Now, read the ccfs for each primary/secondary combo, and find the best combination
summary_dfs = []
for primary in file_dict.keys():
for secondary in file_dict[primary].keys():
data = get_ccf_data(basedir, primary_name=primary, secondary_name=secondary,
vel_arr=vel_arr, type=type)
summary_dfs.append(find_best_pars(data, velocity=velocity, vel_arr=vel_arr))
return pandas.concat(summary_dfs, ignore_index=True)
def find_best_pars(df, velocity='highest', vel_arr=np.arange(-900.0, 900.0, 0.1)):
"""
Find the 'best-fit' parameters for each combination of primary and secondary star
:param df: the dataframe to search in
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: a dataframe with keys of primary, secondary, and the parameters
"""
# Get the names of the primary and secondary stars
primary_names = pandas.unique(df.Primary)
secondary_names = pandas.unique(df.Secondary)
# Find the ccf value at the given velocity
if velocity == 'highest':
fcn = lambda row: (np.max(row), vel_arr[np.argmax(row)])
vals = df['CCF'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# df['ccf_max'] = df['CCF'].map(np.max)
else:
df['ccf_max'] = df['CCF'].map(lambda arr: arr[np.argmin(np.abs(vel_arr - velocity))])
# Find the best parameter for each combination
d = defaultdict(list)
for primary in primary_names:
for secondary in secondary_names:
good = df.loc[(df.Primary == primary) & (df.Secondary == secondary)]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(best['Temperature'].item())
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
return pandas.DataFrame(data=d)
def get_detected_objects(df, tol=1.0):
"""
Takes a summary dataframe with RV information. Finds the median rv for each star,
and removes objects that are 'tol' km/s from the median value
:param df: A summary dataframe, such as created by find_best_pars
:param tol: The tolerance, in km/s, to accept an observation as detected
:return: a dataframe containing only detected companions
"""
secondary_names = pandas.unique(df.Secondary)
secondary_to_rv = defaultdict(float)
for secondary in secondary_names:
rv = df.loc[df.Secondary == secondary]['rv'].median()
secondary_to_rv[secondary] = rv
print secondary, rv
keys = df.Secondary.values
good = df.loc[abs(df.rv.values - np.array(itemgetter(*keys)(secondary_to_rv))) < tol]
return good
def add_actual_temperature(df, method='spt'):
"""
Add the actual temperature to a given summary dataframe
:param df: The dataframe to which we will add the actual secondary star temperature
:param method: How to get the actual temperature. Options are:
- 'spt': Use main-sequence relationships to go from spectral type --> temperature
- 'excel': Use tabulated data, available in the file 'SecondaryStar_Temperatures.xls'
:return: copy of the original dataframe, with an extra column for the secondary star temperature
"""
# First, get a list of the secondary stars in the data
secondary_names = pandas.unique(df.Secondary)
secondary_to_temperature = defaultdict(float)
secondary_to_error = defaultdict(float)
if method.lower() == 'spt':
MS = SpectralTypeRelations.MainSequence()
for secondary in secondary_names:
star_data = StarData.GetData(secondary)
spt = star_data.spectype[0] + re.search('[0-9]\.*[0-9]*', star_data.spectype).group()
T_sec = MS.Interpolate(MS.Temperature, spt)
secondary_to_temperature[secondary] = T_sec
elif method.lower() == 'excel':
table = pandas.read_excel('SecondaryStar_Temperatures.xls', 0)
for secondary in secondary_names:
T_sec = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())]['Literature_Temp'].item()
T_error = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())][
'Literature_error'].item()
secondary_to_temperature[secondary] = T_sec
secondary_to_error[secondary] = T_error
df['Tactual'] = df['Secondary'].map(lambda s: secondary_to_temperature[s])
df['Tact_err'] = df['Secondary'].map(lambda s: secondary_to_error[s])
return
def make_gaussian_process_samples(df):
"""
Make a gaussian process fitting the Tactual-Tmeasured relationship
:param df: pandas DataFrame with columns 'Temperature' (with the measured temperature)
and 'Tactual' (for the actual temperature)
:return: emcee sampler instance
"""
# First, find the uncertainties at each actual temperature
# Tactual = df['Tactual'].values
#Tmeasured = df['Temperature'].values
#error = df['Tact_err'].values
temp = df.groupby('Temperature').mean()['Tactual']
Tmeasured = temp.keys().values
Tactual = temp.values
error = np.nan_to_num(df.groupby('Temperature').std(ddof=1)['Tactual'].values)
default = np.median(error[error > 1])
error = np.maximum(error, np.ones(error.size) * default)
for Tm, Ta, e in zip(Tmeasured, Tactual, error):
print Tm, Ta, e
plt.figure(1)
plt.errorbar(Tmeasured, Tactual, yerr=error, fmt='.k', capsize=0)
plt.plot(Tmeasured, Tmeasured, 'r--')
plt.xlim((min(Tmeasured) - 100, max(Tmeasured) + 100))
plt.xlabel('Measured Temperature')
plt.ylabel('Actual Temperature')
plt.show(block=False)
# Define some functions to use in the GP fit
def model(pars, T):
#polypars = pars[2:]
#return np.poly1d(polypars)(T)
return T
def lnlike(pars, Tact, Tmeas, Terr):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeas, Terr)
return gp.lnlikelihood(Tact - model(pars, Tmeas))
def lnprior(pars):
lna, lntau = pars[:2]
polypars = pars[2:]
if -20 < lna < 20 and 4 < lntau < 20:
return 0.0
return -np.inf
def lnprob(pars, x, y, yerr):
lp = lnprior(pars)
return lp + lnlike(pars, x, y, yerr) if np.isfinite(lp) else -np.inf
# Set up the emcee fitter
initial = np.array([0, 6])#, 1.0, 0.0])
ndim = len(initial)
nwalkers = 100
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Tactual, Tmeasured, error))
print 'Running first burn-in'
p1, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print "Running second burn-in..."
p_best = p1[np.argmax(lnp)]
p2 = [p_best + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p3, _, _ = sampler.run_mcmc(p2, 250)
sampler.reset()
print "Running production..."
sampler.run_mcmc(p3, 1000)
# Plot a bunch of the fits
print "Plotting..."
N = 100
Tvalues = np.arange(3300, 7000, 20)
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
for i, pars in enumerate(par_vals):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
plt.plot(Tvalues, s, 'b-', alpha=0.1)
plt.draw()
# Finally, get posterior samples at all the possibly measured temperatures
print 'Generating posterior samples at all temperatures...'
N = 10000 # This is 1/10th of the total number of samples!
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
Tvalues = np.arange(3000, 6900, 100)
gp_posterior = []
for pars in par_vals:
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
gp_posterior.append(s)
# Finally, make confidence intervals for the actual temperatures
gp_posterior = np.array(gp_posterior)
l, m, h = np.percentile(gp_posterior, [16.0, 50.0, 84.0], axis=0)
conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
'Lower Bound': l, 'Upper bound': h})
conf.to_csv('Confidence_Intervals.csv', index=False)
return sampler, np.array(gp_posterior)
def check_posterior(df, posterior, Tvalues):
"""
Checks the posterior samples: Are 95% of the measurements within 2-sigma of the prediction?
:param df: The summary dataframe
:param posterior: The MCMC predicted values
:param Tvalues: The measured temperatures the posterior was made with
:return: boolean, as well as some warning messages if applicable
"""
# First, make 2-sigma confidence intervals
l, m, h = np.percentile(posterior, [5.0, 50.0, 95.0], axis=0)
# Save the confidence intervals
# conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
# 'Lower Bound': l, 'Upper bound': h})
#conf.to_csv('Confidence_Intervals.csv', index=False)
Ntot = [] # The total number of observations with the given measured temperature
Nacc = [] # The number that have actual temperatures within the confidence interval
g = df.groupby('Temperature')
for i, T in enumerate(Tvalues):
if T in g.groups.keys():
Ta = g.get_group(T)['Tactual']
low, high = l[i], h[i]
Ntot.append(len(Ta))
Nacc.append(len(Ta.loc[(Ta >= low) & (Ta <= high)]))
p = float(Nacc[-1]) / float(Ntot[-1])
if p < 0.95:
logging.warn(
'Only {}/{} of the samples ({:.2f}%) were accepted for T = {} K'.format(Nacc[-1], Ntot[-1], p * 100,
T))
print low, high
print sorted(Ta)
else:
Ntot.append(0)
Nacc.append(0)
p = float(sum(Nacc)) / float(sum(Ntot))
if p < 0.95:
logging.warn('Only {:.2f}% of the total samples were accepted!'.format(p * 100))
return False
return True
if __name__ == '__main__':
pass
| kgullikson88/TS23-Scripts | CheckSyntheticTemperature.py | Python | gpl-3.0 | 14,868 | [
"Gaussian"
] | 261f267f781548492b1ac956f5c1e8efeb31343f6be9bb1dfd2fb8a034928d42 |
try:
import wpilib
except ImportError:
from pyfrc import wpilib
class MyRobot(wpilib.SimpleRobot):
state = 1
def __init__ (self):
super().__init__()
print("Matt the fantastic ultimate wonderful humble person")
wpilib.SmartDashboard.init()
#self.digitalInput=wpilib.DigitalInput(4)
self.CANJaguar = wpilib.CANJaguar(1)
self.gyro = wpilib.Gyro(1)
self.joystick=wpilib.Joystick(1)
self.joystick2=wpilib.Joystick(2)
self.jaguar=wpilib.Jaguar(1)
self.accelerometer=wpilib.ADXL345_I2C(1, wpilib.ADXL345_I2C.kRange_2G)
self.solenoid=wpilib.Solenoid(7)
self.solenoid2=wpilib.Solenoid(8)
self.p=1
self.i=0
self.d=0
wpilib.SmartDashboard.PutBoolean('Soleinoid 1', False)
wpilib.SmartDashboard.PutBoolean('Soleinoid 2', False)
#self.pid = wpilib.PIDController(self.p, self.i, self.d, self.gyro, self.jaguar)
self.sensor = wpilib.AnalogChannel(5)
self.ballthere = False
#self.jaguar2=wpilib.Jaguar(2)
#self.jaguar3=wpilib.Jaguar(3)
#self.jaguar4=wpilib.Jaguar(4)
#self.drive = wpilib.RobotDrive(self.jaguar, self.jaguar2, self.jaguar3, self.jaguar4)#self.jaguar4=wpilib.Jaguar(4)
#self.drive.SetSafetyEnabled(False)
def OperatorControl(self):
#yself.pid.Enable()
print("MyRobot::OperatorControl()")
wpilib.GetWatchdog().SetEnabled(False)
#dog = wpilib.GetWatchdog()
#dog.setEnabled(True)
#dog.SetExpiration(10)
while self.IsOperatorControl() and self.IsEnabled():
#dog.Feed()
#self.drive.MecanumDrive_Cartesian(self.Joystick.GetY(), self.Joystick.GetX(), self.Joystick2.GetX(), 0)
self.FromOperatorControl()
wpilib.Wait(0.01)
def FromOperatorControl(self):
self.CANJaguar.Set((self.joystick.GetY()))
def PIDMove(self):
self.pid.SetSetpoint(10)
''' This was the old, huge while loop.
def OldWhileLoop(self):
wpilib.SmartDashboard.PutNumber('GyroAngle', self.gyro.GetAngle())
wpilib.SmartDashboard.PutNumber('the getVoltage', self.sensor.GetVoltage())
wpilib.SmartDashboard.PutNumber('boolean ballthere', self.ballthere)
wpilib.SmartDashboard.PutNumber('soleinoid 1', self.solenoid.Get())
wpilib.SmartDashboard.PutNumber('soleinoid 2', self.solenoid2.Get())
self.solenoid.Set(wpilib.SmartDashboard.GetBoolean('Soleinoid 1'))
self.solenoid2.Set(wpilib.SmartDashboard.GetBoolean('Soleinoid 2'))
self.PIDMove()
self.OpticalThingy()
axis=self.accelerometer.GetAccelerations()
wpilib.SmartDashboard.PutNumber('Acceleration Axis X', axis.XAxis)
wpilib.SmartDashboard.PutNumber('Acceleration Axis Y', axis.YAxis)
wpilib.SmartDashboard.PutNumber('Acceleration Axis Z', axis.ZAxis)
'''
def OpticalThingy(self):
if self.sensor.GetVoltage()>1:
self.ballthere=True
if self.sensor.GetVoltage()<1:
self.ballthere=False
def run():
robot = MyRobot()
robot.StartCompetition()
return robot
if __name__ == '__main__':
wpilib.run()
| frc1418/2014 | subsystem_tests/src/robot.py | Python | bsd-3-clause | 3,538 | [
"Jaguar"
] | fb067461bc2499adbea8481f4fe750708e93078935b33a70732d59bd0fcef31e |
#!/usr/bin/env python
#
# Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestRadGyrXProfilePI(unittest.TestCase):
def setUp(self):
self.system = espressopp.System()
box = (10, 10, 10)
self.system.rng = espressopp.esutil.RNG()
self.system.bc = espressopp.bc.OrthorhombicBC(self.system.rng, box)
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=0.5)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
particle_list = [
(1, 1, espressopp.Real3D(3.0, 5.0, 5.0)),
(2, 1, espressopp.Real3D(4.0, 5.0, 5.0)),
(3, 0, espressopp.Real3D(5.0, 5.0, 5.0)),
(4, 1, espressopp.Real3D(6.0, 5.0, 5.0)),
(5, 1, espressopp.Real3D(7.0, 5.0, 5.0)),
(6, 0, espressopp.Real3D(8.0, 5.0, 5.0)),
(7, 1, espressopp.Real3D(5.0, 3.0, 5.0)),
(8, 1, espressopp.Real3D(5.0, 4.0, 5.0)),
(9, 1, espressopp.Real3D(5.0, 6.0, 5.0)),
(10, 1, espressopp.Real3D(5.0, 7.0, 5.0)),
(11, 1, espressopp.Real3D(5.0, 8.0, 5.0)),
(12, 0, espressopp.Real3D(5.0, 5.0, 3.0)),
(13, 1, espressopp.Real3D(5.0, 5.0, 4.0)),
(14, 1, espressopp.Real3D(5.0, 5.0, 6.0)),
(15, 0, espressopp.Real3D(5.0, 5.0, 7.0)),
(16, 0, espressopp.Real3D(5.0, 5.0, 8.0))
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'pos')
self.system.storage.decompose()
def test_geometry_spherical(self):
npartsubregion = espressopp.analysis.NPartSubregion(self.system, parttype=1, span=1.5, geometry='spherical', center=[5.0, 5.0, 5.0])
number_of_particles = npartsubregion.compute()
self.assertEqual(number_of_particles, 6)
def test_geometry_xbounded(self):
npartsubregion = espressopp.analysis.NPartSubregion(self.system, parttype=1, span=1.5, geometry='bounded-x', center=[5.0, 5.0, 5.0])
number_of_particles = npartsubregion.compute()
self.assertEqual(number_of_particles, 9)
def test_geometry_ybounded(self):
npartsubregion = espressopp.analysis.NPartSubregion(self.system, parttype=1, span=1.5, geometry='bounded-y', center=[5.0, 5.0, 5.0])
number_of_particles = npartsubregion.compute()
self.assertEqual(number_of_particles, 8)
def test_geometry_zbounded(self):
npartsubregion = espressopp.analysis.NPartSubregion(self.system, parttype=1, span=1.5, geometry='bounded-z', center=[5.0, 5.0, 5.0])
number_of_particles = npartsubregion.compute()
self.assertEqual(number_of_particles, 11)
if __name__ == '__main__':
unittest.main()
| govarguz/espressopp | testsuite/NPartSubregion/test_NPartSubregion.py | Python | gpl-3.0 | 3,603 | [
"ESPResSo"
] | 7d921363955778f19c79f7d89992e28a9edea94ae139bfdeb81f246b27e64201 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.