repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ketjow4/NOV | Lib/encodings/cp437.py | 593 | 34820 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
berkmancenter/mediacloud | apps/common/src/python/mediawords/db/locks.py | 1 | 3477 | """Constants and routines for handling advisory postgres locks."""
import mediawords.db
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
"""
This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions.
If you are using an advisory lock, you should use the two key version and use a constant from this package to
avoid conflicts.
"""
# locks to make sure we are not mining or snapshotting a topic in more than one process at a time
LOCK_TYPES = {
'test-a': 10,
'test-b': 11,
'MediaWords::Job::TM::MineTopic': 12,
'MediaWords::Job::TM::SnapshotTopic': 13,
'MediaWords::TM::Media::media_normalized_urls': 14,
'MediaWords::Crawler::Engine::run_fetcher': 15,
# Testing lock types
'TestPerlWorkerLock': 900,
'TestPythonWorkerLock': 901,
}
class McDBLocksException(Exception):
"""Default exception for package."""
pass
def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool:
"""Get a postgres advisory lock with the lock_type and lock_id as the two keys.
Arguments:
db - db handle
lock_type - must be in LOCK_TYPES dict above
lock_id - id for the particular lock within the type
wait - if true, block while waiting for the lock, else return false if the lock is not available
Returns:
True if the lock is available
"""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if isinstance(wait, bytes):
wait = decode_object_from_bytes_if_needed(wait)
wait = bool(wait)
log.debug("trying for lock: %s, %d" % (lock_type, lock_id))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
if wait:
db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
return True
else:
r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash()
return r['locked']
def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None:
"""Release the postgres advisory lock if it is held."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list:
"""Return a list of all locked ids for the given lock_type."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
# noinspection SqlResolve
return db.query(
"select objid from pg_locks where locktype = 'advisory' and classid = %(a)s",
{'a': lock_type_id}).flat()
| agpl-3.0 |
mixturemodel-flow/tensorflow | tensorflow/contrib/graph_editor/select.py | 75 | 28656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various ways of selecting operations and tensors in a graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from six import iteritems
from six import string_types
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"can_be_regex",
"make_regex",
"filter_ts",
"filter_ts_from_regex",
"filter_ops",
"filter_ops_from_regex",
"get_name_scope_ops",
"check_cios",
"get_ops_ios",
"compute_boundary_ts",
"get_within_boundary_ops",
"get_forward_walk_ops",
"get_backward_walk_ops",
"get_walks_intersection_ops",
"get_walks_union_ops",
"select_ops",
"select_ts",
"select_ops_and_ts",
]
_RE_TYPE = type(re.compile(""))
def can_be_regex(obj):
"""Return True if obj can be turned into a regular expression."""
return isinstance(obj, string_types + (_RE_TYPE,))
def make_regex(obj):
"""Return a compiled regular expression.
Args:
obj: a string or a regular expression.
Returns:
A compiled regular expression.
Raises:
ValueError: if obj could not be converted to a regular expression.
"""
if not can_be_regex(obj):
raise ValueError("Expected a string or a regex, got: {}".format(type(obj)))
if isinstance(obj, string_types):
return re.compile(obj)
else:
return obj
def _get_input_ts(ops):
"""Compute the list of unique input tensors of all the op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
Returns:
The list of unique input tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = []
ts_set = set()
for op in ops:
for t in op.inputs:
if t not in ts_set:
ts.append(t)
ts_set.add(t)
return ts
def _get_output_ts(ops):
"""Compute the list of unique output tensors of all the op in ops.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
The list of unique output tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
ts = []
for op in ops:
ts += op.outputs
return ts
def filter_ts(ops, positive_filter):
"""Get all the tensors which are input or output of an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
positive_filter: a function deciding whether to keep a tensor or not.
If `True`, all the tensors are returned.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = _get_input_ts(ops)
util.concatenate_unique(ts, _get_output_ts(ops))
if positive_filter is not True:
ts = [t for t in ts if positive_filter(t)]
return ts
def filter_ts_from_regex(ops, regex):
r"""Get all the tensors linked to ops that match the given regex.
Args:
ops: an object convertible to a list of tf.Operation.
regex: a regular expression matching the tensors' name.
For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo"
scope.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name))
def filter_ops(ops, positive_filter):
"""Get the ops passing the given filter.
Args:
ops: an object convertible to a list of tf.Operation.
positive_filter: a function deciding where to keep an operation or not.
If True, all the operations are returned.
Returns:
A list of selected tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
if positive_filter is not True: # pylint: disable=g-explicit-bool-comparison
ops = [op for op in ops if positive_filter(op)]
return ops
def filter_ops_from_regex(ops, regex):
"""Get all the operations that match the given regex.
Args:
ops: an object convertible to a list of `tf.Operation`.
regex: a regular expression matching the operation's name.
For example, `"^foo(/.*)?$"` will match all the operations in the "foo"
scope.
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ops(ops, lambda op: regex_obj.search(op.name))
def get_name_scope_ops(ops, scope):
"""Get all the operations under the given scope path.
Args:
ops: an object convertible to a list of tf.Operation.
scope: a scope path.
Returns:
A list of tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
if scope and scope[-1] == "/":
scope = scope[:-1]
return filter_ops_from_regex(ops, "^{}(/.*)?$".format(scope))
def check_cios(control_inputs=False, control_outputs=None, control_ios=None):
"""Do various check on control_inputs and control_outputs.
Args:
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A tuple `(control_inputs, control_outputs)` where:
`control_inputs` is a boolean indicating whether to use control inputs.
`control_outputs` is an instance of util.ControlOutputs or None
Raises:
ValueError: if control_inputs is an instance of util.ControlOutputs but
control_outputs is not None
TypeError: if control_outputs is not None and is not a util.ControlOutputs.
"""
if control_ios is not None:
if not isinstance(control_ios, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_ios)))
if control_outputs is not None:
raise ValueError("control_outputs should be None when using control_ios.")
control_inputs = True
control_outputs = control_ios
elif control_outputs is not None:
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_outputs)))
if control_outputs is not None:
control_outputs.update()
return control_inputs, control_outputs
def get_ops_ios(ops, control_inputs=False, control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` which are connected to an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`,
both control inputs and control outputs are enabled. This is equivalent to
set `control_inputs` to `True` and `control_outputs` to the
`util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
res = []
for op in ops:
util.concatenate_unique(res, [t.op for t in op.inputs])
for t in op.outputs:
util.concatenate_unique(res, t.consumers())
if control_outputs is not None:
util.concatenate_unique(res, control_outputs.get(op))
if control_inputs:
util.concatenate_unique(res, op.control_inputs)
return res
def compute_boundary_ts(ops):
"""Compute the tensors at the boundary of a set of ops.
This function looks at all the tensors connected to the given ops (in/out)
and classify them into three categories:
1) input tensors: tensors whose generating operation is not in ops.
2) output tensors: tensors whose consumer operations are not in ops
3) inside tensors: tensors which are neither input nor output tensors.
Note that a tensor can be both an inside tensor and an output tensor if it is
consumed by operations both outside and inside of `ops`.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
A tuple `(outside_input_ts, outside_output_ts, inside_ts)` where:
`outside_input_ts` is a Python list of input tensors;
`outside_output_ts` is a python list of output tensors;
`inside_ts` is a python list of inside tensors.
Since a tensor can be both an inside tensor and an output tensor,
`outside_output_ts` and `inside_ts` might intersect.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
input_ts = _get_input_ts(ops)
output_ts = _get_output_ts(ops)
output_ts_set = frozenset(output_ts)
ops_set = frozenset(ops)
# Compute inside tensors.
inside_ts = []
only_inside_ts = []
for t in input_ts:
# Skip if the input tensor is not also an output tensor.
if t not in output_ts_set:
continue
# Mark as "inside".
inside_ts.append(t)
# Mark as "only inside" if the tensor is not both inside and output.
consumers = frozenset(t.consumers())
if consumers - ops_set:
continue
only_inside_ts.append(t)
inside_ts_set = frozenset(inside_ts)
only_inside_ts_set = frozenset(only_inside_ts)
outside_output_ts = [t for t in output_ts if t not in only_inside_ts_set]
outside_input_ts = [t for t in input_ts if t not in inside_ts_set]
return outside_input_ts, outside_output_ts, inside_ts
def get_within_boundary_ops(ops,
seed_ops,
boundary_ops=(),
inclusive=True,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` within the given boundary.
Args:
ops: an object convertible to a list of `tf.Operation`. those ops define the
set in which to perform the operation (if a `tf.Graph` is given, it
will be converted to the list of all its operations).
seed_ops: the operations from which to start expanding.
boundary_ops: the ops forming the boundary.
inclusive: if `True`, the result will also include the boundary ops.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not
`None`, both control inputs and control outputs are enabled. This is
equivalent to set control_inputs to True and control_outputs to
the `util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` or `seed_ops` cannot be converted to a list of
`tf.Operation`.
ValueError: if the boundary is intersecting with the seeds.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
boundary_ops = set(util.make_list_of_op(boundary_ops))
res = set(seed_ops)
if boundary_ops & res:
raise ValueError("Boundary is intersecting with the seeds.")
wave = set(seed_ops)
while wave:
new_wave = set()
ops_io = get_ops_ios(wave, control_inputs, control_outputs)
for op in ops_io:
if op in res:
continue
if op in boundary_ops:
if inclusive:
res.add(op)
else:
new_wave.add(op)
res.update(new_wave)
wave = new_wave
return [op for op in ops if op in res]
def get_forward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_outputs=None):
"""Do a forward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_outputs: a `util.ControlOutputs` instance or None.
If not `None`, it will be used while walking the graph forward.
Returns:
A Python set of all the `tf.Operation` ahead of `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
_, control_outputs = check_cios(False, control_outputs)
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_consuming_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
seed_ops = frozenset(seed_ops)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.outputs:
if new_t in stop_at_ts:
continue
for new_op in new_t.consumers():
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
if control_outputs is not None:
for new_op in control_outputs.get(op):
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_backward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_inputs=False):
"""Do a backward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_inputs: if True, control inputs will be used while moving backward.
Returns:
A Python set of all the `tf.Operation` behind `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_generating_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
seed_ops = frozenset(util.make_list_of_op(seed_ops))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.inputs:
if new_t in stop_at_ts:
continue
if new_t.op not in result and is_within(new_t.op):
new_wave.add(new_t.op)
if control_inputs:
for new_op in op.control_inputs:
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_walks_intersection_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the intersection of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: an iterable of tf.Operation within which the search is
restricted. If within_ops is None, the search is performed within
the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the intersection of a forward and a
backward walk.
Raises:
TypeError: if `forward_seed_ops` or `backward_seed_ops` or `within_ops`
cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return [op for op in forward_ops if op in backward_ops]
def get_walks_union_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the union of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: restrict the search within those operations. If within_ops is
None, the search is done within the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the union of a forward and a
backward walk.
Raises:
TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be
converted to a list of tf.Operation.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return util.concatenate_unique(forward_ops, backward_ops)
def select_ops(*args, **kwargs):
"""Helper to select operations.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation`. `tf.Tensor` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ops_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ops)".
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Operation`
or an (array of) `tf.Tensor` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ops_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ops_regex":
restrict_ops_regex = v
elif k == "restrict_ts_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ops = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ts)"):
continue
if restrict_ops_regex and not regex.pattern.startswith("(?#ops)"):
continue
ops_ = filter_ops_from_regex(graph, regex)
for op_ in ops_:
if op_ not in ops:
if positive_filter is None or positive_filter(op_):
ops.append(op_)
else:
ops_aux = util.make_list_of_op(arg, ignore_ts=True)
if positive_filter is not None:
ops_aux = [op for op in ops_aux if positive_filter(op)]
ops_aux = [op for op in ops_aux if op not in ops]
ops += ops_aux
return ops
def select_ts(*args, **kwargs):
"""Helper to select tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Tensor`. `tf.Operation` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ts_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ts)".
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ts_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ts_regex":
restrict_ts_regex = v
elif k == "restrict_ops_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ts = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ops)"):
continue
if restrict_ts_regex and not regex.pattern.startswith("(?#ts)"):
continue
ts_ = filter_ts_from_regex(graph, regex)
for t_ in ts_:
if t_ not in ts:
if positive_filter is None or positive_filter(t_):
ts.append(t_)
else:
ts_aux = util.make_list_of_t(arg, ignore_ops=True)
if positive_filter is not None:
ts_aux = [t for t in ts_aux if positive_filter(t)]
ts_aux = [t for t in ts_aux if t not in ts]
ts += ts_aux
return ts
def select_ops_and_ts(*args, **kwargs):
"""Helper to select operations and tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation` 3) (array of) tf.Tensor. Regular expressions matching
tensors must start with the comment `"(?#ts)"`, for instance:
`"(?#ts)^foo/.*"`.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
Returns:
A tuple `(ops, ts)` where:
`ops` is a list of `tf.Operation`, and
`ts` is a list of `tf.Tensor`
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
ops = select_ops(*args, restrict_ops_regex=False, **kwargs)
ts = select_ts(*args, restrict_ts_regex=True, **kwargs)
return ops, ts
| apache-2.0 |
suiyuan2009/tensorflow | tensorflow/contrib/saved_model/__init__.py | 109 | 1411 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.signature_def_utils import *
# pylint: enable=unused-import,widcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["get_signature_def_by_key"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/component_cornice.py | 2 | 1974 | """Instrumentation for the Cornice REST library for Pyramid.
"""
import functools
from newrelic.agent import (ObjectProxy, function_wrapper, callable_name,
current_transaction, FunctionTrace, wrap_function_wrapper)
module_cornice_service = None
@function_wrapper
def wrapper_Resource_method(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
name = callable_name(wrapped)
transaction.set_transaction_name(name)
with FunctionTrace(transaction, name):
return wrapped(*args, **kwargs)
def wrapper_Resource(view):
@function_wrapper
def _wrapper_Resource(wrapped, instance, args, kwargs):
ob = wrapped(*args, **kwargs)
method = getattr(ob, view)
setattr(ob, view, wrapper_Resource_method(method))
return ob
return _wrapper_Resource
def wrapper_decorate_view(wrapped, instance, args, kwargs):
def _bind_params(view, args, method):
return view, args, method
_view, _args, _method = _bind_params(*args, **kwargs)
if 'klass' in _args and not callable(_view):
if module_cornice_service.is_string(_view):
_klass = _args['klass']
_args = dict(_args)
_args['klass'] = wrapper_Resource(_view)(_klass)
return wrapped(_view, _args, _method)
# For Cornice 0.17 or older we need to fixup the fact that they do
# not copy the wrapped view attributes to the wrapper it returns.
# This is only needed where the view is not a string.
wrapper = wrapped(*args, **kwargs)
if not module_cornice_service.is_string(_view):
if wrapper.__name__ != _view.__name__:
return functools.wraps(_view)(wrapper)
return wrapper
def instrument_cornice_service(module):
global module_cornice_service
module_cornice_service = module
wrap_function_wrapper(module, 'decorate_view', wrapper_decorate_view)
| agpl-3.0 |
zachcp/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
CCPorg/DMD-Diamond-Ver-102-Copy | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
bssrdf/zulip | zerver/lib/test_helpers.py | 113 | 12407 | from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient, get_user_profile_by_email,
)
from zerver.models import (
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = Realm.objects.get(domain=realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = Realm.objects.get(domain=resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
| apache-2.0 |
nrb/ansible-modules-extras | cloud/amazon/route53_zone.py | 37 | 5487 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: route53_zone
short_description: add or delete Route53 zones
description:
- Creates and deletes Route53 private and public zones
version_added: "2.0"
options:
zone:
description:
- "The DNS zone record (eg: foo.com.)"
required: true
state:
description:
- whether or not the zone should exist or not
required: false
default: true
choices: [ "present", "absent" ]
vpc_id:
description:
- The VPC ID the zone should be a part of (if this is going to be a private zone)
required: false
default: null
vpc_region:
description:
- The VPC Region the zone should be a part of (if this is going to be a private zone)
required: false
default: null
comment:
description:
- Comment associated with the zone
required: false
default: ''
extends_documentation_fragment: aws
author: "Christopher Troup (@minichate)"
'''
import time
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.zone import Zone
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
module = AnsibleModule(
argument_spec=dict(
zone=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
vpc_id=dict(default=None),
vpc_region=dict(default=None),
comment=dict(default=''),
)
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
zone_in = module.params.get('zone').lower()
state = module.params.get('state').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
comment = module.params.get('comment')
private_zone = vpc_id is not None and vpc_region is not None
_, _, aws_connect_kwargs = get_aws_connection_info(module)
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
results = conn.get_all_hosted_zones()
zones = {}
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
if vpc_id and 'VPCs' in zone_details:
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
zones[r53zone['Name']] = zone_id
else: # Forward compatibility for when boto fixes that bug
if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
zones[r53zone['Name']] = zone_id
else:
zones[r53zone['Name']] = zone_id
record = {
'private_zone': private_zone,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'comment': comment,
}
if state == 'present' and zone_in in zones:
if private_zone:
details = conn.get_hosted_zone(zones[zone_in])
if 'VPCs' not in details['GetHostedZoneResponse']:
module.fail_json(
msg="Can't change VPC from public to private"
)
vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
current_vpc_id = vpc_details['VPCId']
current_vpc_region = vpc_details['VPCRegion']
if current_vpc_id != vpc_id:
module.fail_json(
msg="Can't change VPC ID once a zone has been created"
)
if current_vpc_region != vpc_region:
module.fail_json(
msg="Can't change VPC Region once a zone has been created"
)
record['zone_id'] = zones[zone_in]
record['name'] = zone_in
module.exit_json(changed=False, set=record)
elif state == 'present':
result = conn.create_hosted_zone(zone_in, **record)
hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
record['zone_id'] = zone_id
record['name'] = zone_in
module.exit_json(changed=True, set=record)
elif state == 'absent' and zone_in in zones:
conn.delete_hosted_zone(zones[zone_in])
module.exit_json(changed=True)
elif state == 'absent':
module.exit_json(changed=False)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/ext-runtime/ruby/test/runtime_test.py | 2 | 18352 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from gae_ext_runtime import testutil
RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__))
DOCKERFILE_TEXT = '''\
# This Dockerfile for a Ruby application was generated by gcloud.
# The base Dockerfile installs:
# * A number of packages needed by the Ruby runtime and by gems
# commonly used in Ruby web apps (such as libsqlite3)
# * A recent version of NodeJS
# * A recent version of the standard Ruby runtime to use by default
# * The bundler gem
FROM gcr.io/google_appengine/ruby:{base_image_tag}
# If your application requires a specific ruby version (compatible with rbenv),
# set it here. Leave blank to use the currently recommended default.
ARG REQUESTED_RUBY_VERSION="{ruby_version}"
# Install any requested ruby if not already preinstalled by the base image.
# Tries installing a prebuilt package first, then falls back to a source build.
RUN if test -n "$REQUESTED_RUBY_VERSION" -a \\
! -x /rbenv/versions/$REQUESTED_RUBY_VERSION/bin/ruby; then \\
(apt-get update -y \\
&& apt-get install -y -q gcp-ruby-$REQUESTED_RUBY_VERSION) \\
|| (cd /rbenv/plugins/ruby-build \\
&& git pull \\
&& rbenv install -s $REQUESTED_RUBY_VERSION) \\
&& rbenv global $REQUESTED_RUBY_VERSION \\
&& gem install -q --no-rdoc --no-ri bundler --version $BUNDLER_VERSION \\
&& apt-get clean \\
&& rm -f /var/lib/apt/lists/*_*; \\
fi
ENV RBENV_VERSION=${{REQUESTED_RUBY_VERSION:-$RBENV_VERSION}}
# Copy the application files.
COPY . /app/
# Install required gems if Gemfile.lock is present.
RUN if test -f Gemfile.lock; then \\
bundle install --deployment --without="development test" \\
&& rbenv rehash; \\
fi
# Temporary. Will be moved to base image later.
ENV RACK_ENV=production \\
RAILS_ENV=production \\
RAILS_SERVE_STATIC_FILES=true
# Run asset pipeline if we're in a Rails app.
RUN if test -d app/assets -a -f config/application.rb; then \\
bundle exec rake assets:precompile || true; \\
fi
# BUG: Reset entrypoint to override base image.
ENTRYPOINT []
# Start application on port $PORT.
CMD {entrypoint}
'''
class RuntimeTestCase(testutil.TestBase):
"""Tests for the Ruby external runtime fingerprinter."""
def file_contents(self, filename):
"""Reads the contents of the file from the tempdir.
Args:
filename: (str) filename to be joined with tempdir prefix.
Returns:
File contents.
"""
with open(self.full_path(filename)) as f:
return f.read()
def stub_response(self, response):
"""Stubs the console response from the user.
Args:
response: (str) stubbed response.
Returns:
A function to reset the stubbed functions to their original
implementations.
"""
can_prompt = self.exec_env.CanPrompt
prompt_response = self.exec_env.PromptResponse
def unstub():
self.exec_env.CanPrompt = can_prompt
self.exec_env.PromptResponse = prompt_response
self.exec_env.CanPrompt = lambda: True
self.exec_env.PromptResponse = lambda prompt: response
return unstub
def setUp(self):
self.runtime_def_root = RUNTIME_DEF_ROOT
super(RuntimeTestCase, self).setUp()
def test_generate_without_ruby_files(self):
self.write_file('index.html', 'index')
self.generate_configs()
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_without_ruby_files_no_write(self):
"""Tests generate_config_data does nothing if no ruby files."""
self.write_file('index.html', 'index')
self.assertIsNone(self.generate_config_data())
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
def test_generate_with_ruby_files(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_with_ruby_files_no_write(self):
"""Tests generate_config_data with basic Ruby files.
Tests that app.yaml is written with correct contents given entrypoint
response, and that Dockerfile and .dockerignore not written to disk.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertNotIn('Dockerfile', [f.filename for f in cfg_files])
self.assertNotIn('.dockerignore', [f.filename for f in cfg_files])
def test_generate_with_deploy(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_deploy_no_write(self):
"""Tests generate_config_data with deploy=True.
Tests that .dockerignore and Dockerfile contents are correct
based on contents of app.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom_no_write(self):
"""Tests generate_config_data with custom=True.
Tests that app.yaml is written with correct parameters and
Dockerfile, .dockerignore contents are correctly returned by method.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo_no_write(self):
"""Tests generate_config_data with passed appinfo."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version_no_write(self):
"""Tests generate_config_data with .ruby-version file."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt_no_write(self):
"""Tests generate_config_data with entrypoint given by prompt."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
premanandchandrasekar/boto | boto/ec2/buyreservation.py | 56 | 3813 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto.ec2
from boto.sdb.db.property import StringProperty, IntegerProperty
from boto.manage import propget
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge', 'm2.xlarge',
'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge',
't1.micro']
class BuyReservation(object):
def get_region(self, params):
if not params.get('region', None):
prop = StringProperty(name='region', verbose_name='EC2 Region',
choices=boto.ec2.regions)
params['region'] = propget.get(prop, choices=boto.ec2.regions)
def get_instance_type(self, params):
if not params.get('instance_type', None):
prop = StringProperty(name='instance_type', verbose_name='Instance Type',
choices=InstanceTypes)
params['instance_type'] = propget.get(prop)
def get_quantity(self, params):
if not params.get('quantity', None):
prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
params['quantity'] = propget.get(prop)
def get_zone(self, params):
if not params.get('zone', None):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get(self, params):
self.get_region(params)
self.ec2 = params['region'].connect()
self.get_instance_type(params)
self.get_zone(params)
self.get_quantity(params)
if __name__ == "__main__":
obj = BuyReservation()
params = {}
obj.get(params)
offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'],
availability_zone=params['zone'].name)
print '\nThe following Reserved Instances Offerings are available:\n'
for offering in offerings:
offering.describe()
prop = StringProperty(name='offering', verbose_name='Offering',
choices=offerings)
offering = propget.get(prop)
print '\nYou have chosen this offering:'
offering.describe()
unit_price = float(offering.fixed_price)
total_price = unit_price * params['quantity']
print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)
answer = raw_input('Are you sure you want to do this? If so, enter YES: ')
if answer.strip().lower() == 'yes':
offering.purchase(params['quantity'])
else:
print 'Purchase cancelled'
| mit |
vileopratama/vitech | src/openerp/report/printscreen/ps_list.py | 48 | 11008 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp.report.interface import report_int
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from lxml import etree
from openerp.report import render, report_sxw
import locale
import time, os
from operator import itemgetter
from datetime import datetime
class report_printscreen_list(report_int):
def __init__(self, name):
report_int.__init__(self, name)
self.context = {}
self.groupby = []
self.cr=''
def _parse_node(self, root_node):
result = []
for node in root_node:
field_name = node.get('name')
if not eval(str(node.attrib.get('invisible',False)),{'context':self.context}):
if node.tag == 'field':
if field_name in self.groupby:
continue
result.append(field_name)
else:
result.extend(self._parse_node(node))
return result
def _parse_string(self, view):
try:
dom = etree.XML(view.encode('utf-8'))
except Exception:
dom = etree.XML(view)
return self._parse_node(dom)
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
self.cr=cr
self.context = context
self.groupby = context.get('group_by',[])
self.groupby_no_leaf = context.get('group_by_no_leaf',False)
registry = openerp.registry(cr.dbname)
model = registry[datas['model']]
model_id = registry['ir.model'].search(cr, uid, [('model','=',model._name)])
model_desc = model._description
if model_id:
model_desc = registry['ir.model'].browse(cr, uid, model_id[0], context).name
self.title = model_desc
datas['ids'] = ids
result = model.fields_view_get(cr, uid, view_type='tree', context=context)
fields_order = self.groupby + self._parse_string(result['arch'])
if self.groupby:
rows = []
def get_groupby_data(groupby = [], domain = []):
records = model.read_group(cr, uid, domain, fields_order, groupby , 0, None, context)
for rec in records:
rec['__group'] = True
rec['__no_leaf'] = self.groupby_no_leaf
rec['__grouped_by'] = groupby[0] if (isinstance(groupby, list) and groupby) else groupby
for f in fields_order:
if f not in rec:
rec.update({f:False})
elif isinstance(rec[f], tuple):
rec[f] = rec[f][1]
rows.append(rec)
inner_groupby = (rec.get('__context', {})).get('group_by',[])
inner_domain = rec.get('__domain', [])
if inner_groupby:
get_groupby_data(inner_groupby, inner_domain)
else:
if self.groupby_no_leaf:
continue
child_ids = model.search(cr, uid, inner_domain)
res = model.read(cr, uid, child_ids, result['fields'].keys(), context)
res.sort(lambda x,y: cmp(ids.index(x['id']), ids.index(y['id'])))
rows.extend(res)
dom = [('id','in',ids)]
if self.groupby_no_leaf and len(ids) and not ids[0]:
dom = datas.get('_domain',[])
get_groupby_data(self.groupby, dom)
else:
rows = model.read(cr, uid, datas['ids'], result['fields'].keys(), context)
ids2 = map(itemgetter('id'), rows) # getting the ids from read result
if datas['ids'] != ids2: # sorted ids were not taken into consideration for print screen
rows_new = []
for id in datas['ids']:
rows_new += [elem for elem in rows if elem['id'] == id]
rows = rows_new
res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc)
return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0, 210.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
#_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
registry = openerp.registry(self.cr.dbname)
_append_node('company', registry['res.users'].browse(self.cr,uid,uid).company_id.name)
rpt_obj = registry['res.users']
rml_obj=report_sxw.rml_parse(self.cr, uid, rpt_obj._name,context)
_append_node('header-date', str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")))
l = []
t = 0
strmax = (pageSize[0]-40) * 2.8346
temp = []
tsum = []
for i in range(0, len(fields_order)):
temp.append(0)
tsum.append(0)
ince = -1
for f in fields_order:
s = 0
ince += 1
if fields[f]['type'] in ('date','time','datetime','float','integer'):
s = 60
strmax -= s
if fields[f]['type'] in ('float','integer'):
temp[ince] = 1
else:
t += fields[f].get('size', 80) / 28 + 1
l.append(s)
for pos in range(len(l)):
if not l[pos]:
s = fields[fields_order[pos]].get('size', 80) / 28 + 1
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.SubElement(header, 'field')
field.text = tools.ustr(fields[f]['string'] or '')
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
count = -1
for f in fields_order:
float_flag = 0
count += 1
if fields[f]['type']=='many2one' and line[f]:
if not line.get('__group'):
line[f] = line[f][1]
if fields[f]['type']=='selection' and line[f]:
for key, value in fields[f]['selection']:
if key == line[f]:
line[f] = value
break
if fields[f]['type'] in ('one2many','many2many') and line[f]:
line[f] = '( '+tools.ustr(len(line[f])) + ' )'
if fields[f]['type'] == 'float' and line[f]:
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
prec ='%.' + str(precision) +'f'
line[f]=prec%(line[f])
float_flag = 1
if fields[f]['type'] == 'date' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))
d1 = datetime.strptime(line[f],'%Y-%m-%d')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'time' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'datetime' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))+' '+str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%Y-%m-%d %H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
if line.get('__group'):
col = etree.SubElement(node_line, 'col', para='group', tree='no')
else:
col = etree.SubElement(node_line, 'col', para='yes', tree='no')
# Prevent empty labels in groups
if f == line.get('__grouped_by') and line.get('__group') and not line[f] and not float_flag and not temp[count]:
col.text = line[f] = 'Undefined'
col.set('tree', 'undefined')
if line[f] is not None:
col.text = tools.ustr(line[f] or '')
if float_flag:
col.set('tree','float')
if line.get('__no_leaf') and temp[count] == 1 and f != 'id' and not line['__context']['group_by']:
tsum[count] = float(tsum[count]) + float(line[f])
if not line.get('__group') and f != 'id' and temp[count] == 1:
tsum[count] = float(tsum[count]) + float(line[f])
else:
col.text = '/'
node_line = etree.SubElement(lines, 'row')
for f in range(0, len(fields_order)):
col = etree.SubElement(node_line, 'col', para='group', tree='no')
col.set('tree', 'float')
if tsum[f] is not None:
if tsum[f] != 0.0:
digits = fields[fields_order[f]].get('digits', (16, 2))
prec = '%%.%sf' % (digits[1], )
total = prec % (tsum[f], )
txt = str(total or '')
else:
txt = str(tsum[f] or '')
else:
txt = '/'
if f == 0:
txt ='Total'
col.set('tree','no')
col.text = tools.ustr(txt or '')
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml, title=self.title)
self.obj.render()
return True
report_printscreen_list('report.printscreen.list')
| mit |
bigmlcom/python | bigml/tests/test_34_time_series.py | 2 | 3565 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating time series forecasts
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_time_series_steps as time_series_create
from . import create_forecast_steps as forecast_create
class TestTimeSeries(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating forecasts from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create time-series from a dataset
And I wait until the time series is ready less than <time_3> secs
And I update the time series name to "<time_series_name>"
When I wait until the time series is ready less than <time_4> secs
Then the time series name is "<time_series_name>"
And I create a forecast for "<input_data>"
Then the forecasts are "<forecast_points>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | time_series_name |input_data | forecast_points
| ../data/grades.csv | 10 | 10 | 20 | 50 | my new time_series name |
{"000005": {"horizon": 5}], {}}
"""
print(self.test_scenario1.__doc__)
examples = [
['data/grades.csv', '30', '30', '50', '50', 'my new time series name',
'{"000005": {"horizon": 5}}', '{"000005": [{"point_forecast": [73.96192, 74.04106, 74.12029, 74.1996, 74.27899], "model": "M,M,N"}]}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
time_series_create.i_create_a_time_series(self)
time_series_create.the_time_series_is_finished_in_less_than(self, example[3])
time_series_create.i_update_time_series_name(self, example[5])
time_series_create.the_time_series_is_finished_in_less_than(self, example[4])
time_series_create.i_check_time_series_name(self, example[5])
forecast_create.i_create_a_forecast(self, example[6])
forecast_create.the_forecast_is(self, example[7])
| apache-2.0 |
shsingh/ansible | lib/ansible/modules/network/nxos/nxos_vpc_interface.py | 18 | 10331 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vpc_interface
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages interface VPC configuration
description:
- Manages interface VPC configuration
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Either vpc or peer_link param is required, but not both.
- C(state=absent) removes whatever VPC config is on a port-channel
if one exists.
- Re-assigning a vpc or peerlink from one portchannel to another is not
supported. The module will force the user to unconfigure an existing
vpc/pl before configuring the same value on a new portchannel
options:
portchannel:
description:
- Group number of the portchannel that will be configured.
required: true
vpc:
description:
- VPC group/id that will be configured on associated portchannel.
peer_link:
description:
- Set to true/false for peer link config on associated portchannel.
type: bool
state:
description:
- Manages desired state of the resource.
required: true
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- nxos_vpc_interface:
portchannel: 10
vpc: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface port-channel100", "vpc 10"]
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_portchannel_list(module):
portchannels = []
pc_list = []
try:
body = run_commands(module, ['show port-channel summary | json'])[0]
pc_list = body['TABLE_channel']['ROW_channel']
except (KeyError, AttributeError, TypeError):
return portchannels
if pc_list:
if isinstance(pc_list, dict):
pc_list = [pc_list]
for pc in pc_list:
portchannels.append(pc['group'])
return portchannels
def get_existing_portchannel_to_vpc_mappings(module):
pc_vpc_mapping = {}
try:
body = run_commands(module, ['show vpc brief | json'])[0]
vpc_table = body['TABLE_vpc']['ROW_vpc']
except (KeyError, AttributeError, TypeError):
vpc_table = None
if vpc_table:
if isinstance(vpc_table, dict):
vpc_table = [vpc_table]
for vpc in vpc_table:
pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])
return pc_vpc_mapping
def peer_link_exists(module):
found = False
run = get_config(module, flags=['vpc'])
vpc_list = run.split('\n')
for each in vpc_list:
if 'peer-link' in each:
found = True
return found
def get_active_vpc_peer_link(module):
peer_link = None
try:
body = run_commands(module, ['show vpc brief | json'])[0]
peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']
except (KeyError, AttributeError, TypeError):
return peer_link
return peer_link
def get_portchannel_vpc_config(module, portchannel):
peer_link_pc = None
peer_link = False
vpc = ""
pc = ""
config = {}
try:
body = run_commands(module, ['show vpc brief | json'])[0]
table = body['TABLE_peerlink']['ROW_peerlink']
except (KeyError, AttributeError, TypeError):
table = {}
if table:
peer_link_pc = table.get('peerlink-ifindex', None)
if peer_link_pc:
plpc = str(peer_link_pc[2:])
if portchannel == plpc:
config['portchannel'] = portchannel
config['peer-link'] = True
config['vpc'] = vpc
mapping = get_existing_portchannel_to_vpc_mappings(module)
for existing_vpc, port_channel in mapping.items():
port_ch = str(port_channel[2:])
if port_ch == portchannel:
pc = port_ch
vpc = str(existing_vpc)
config['portchannel'] = pc
config['peer-link'] = peer_link
config['vpc'] = vpc
return config
def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):
commands = []
if not delta.get('peer-link') and existing.get('peer-link'):
commands.append('no vpc peer-link')
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
elif delta.get('peer-link') and not existing.get('peer-link'):
commands.append('vpc peer-link')
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
elif delta.get('vpc') and not existing.get('vpc'):
command = 'vpc {0}'.format(config_value)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def state_present(portchannel, delta, config_value, existing):
commands = []
command = get_commands_to_config_vpc_interface(
portchannel,
delta,
config_value,
existing
)
commands.append(command)
return commands
def state_absent(portchannel, existing):
commands = []
if existing.get('vpc'):
command = 'no vpc'
commands.append(command)
elif existing.get('peer-link'):
command = 'no vpc peer-link'
commands.append(command)
if commands:
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def main():
argument_spec = dict(
portchannel=dict(required=True, type='str'),
vpc=dict(required=False, type='str'),
peer_link=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vpc', 'peer_link']],
supports_check_mode=True)
warnings = list()
commands = []
results = {'changed': False, 'warnings': warnings}
portchannel = module.params['portchannel']
vpc = module.params['vpc']
peer_link = module.params['peer_link']
state = module.params['state']
args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}
active_peer_link = None
if portchannel not in get_portchannel_list(module):
if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module):
module.fail_json(msg="The portchannel you are trying to make a"
" VPC or PL is not created yet. "
"Create it first!")
if vpc:
mapping = get_existing_portchannel_to_vpc_mappings(module)
if vpc in mapping and portchannel != mapping[vpc].strip('Po'):
module.fail_json(msg="This vpc is already configured on "
"another portchannel. Remove it first "
"before trying to assign it here. ",
existing_portchannel=mapping[vpc])
for vpcid, existing_pc in mapping.items():
if portchannel == existing_pc.strip('Po') and vpcid != vpc:
module.fail_json(msg="This portchannel already has another"
" VPC configured. Remove it first "
"before assigning this one",
existing_vpc=vpcid)
if peer_link_exists(module):
active_peer_link = get_active_vpc_peer_link(module)
if active_peer_link[-2:] == portchannel:
module.fail_json(msg="That port channel is the current "
"PEER LINK. Remove it if you want it"
" to be a VPC")
config_value = vpc
elif peer_link is not None:
if peer_link_exists(module):
active_peer_link = get_active_vpc_peer_link(module)[2::]
if active_peer_link != portchannel:
if peer_link:
module.fail_json(msg="A peer link already exists on"
" the device. Remove it first",
current_peer_link='Po{0}'.format(active_peer_link))
config_value = 'peer-link'
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_portchannel_vpc_config(module, portchannel)
if state == 'present':
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
commands = state_present(portchannel, delta, config_value, existing)
elif state == 'absent' and existing:
commands = state_absent(portchannel, existing)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
QTek/QRadio | tramatego/src/tramatego/transforms/ipv4_to_score.py | 1 | 1161 | #!/usr/bin/env python
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from canari.maltego.entities import IPv4Address, Phrase
from common.launchers import get_qradio_data
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, TramaTego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform',
#'onterminate' # comment out this line if you don't need this function.
]
#@superuser
@configure(
label='IPv4 to Score',
description='Converts IPv4 into Score using QRadio.',
uuids=[ 'TramaTego.v1.IPv4ToScore' ],
inputs=[ ( 'TramaTego', IPv4Address ) ],
debug=True
)
def dotransform(request, response, config):
command = "--ipv4_to_score " + request.value
qradio_output = get_qradio_data(command, 3)
for entry in qradio_output:
response += Phrase(entry)
return response
def onterminate():
"""
TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable
"""
pass | apache-2.0 |
qifeigit/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
coinkite/connectrum | connectrum/findall.py | 1 | 4527 | #!/usr/bin/env python3
#
#
import bottom, random, time, asyncio
from .svr_info import ServerInfo
import logging
logger = logging.getLogger('connectrum')
class IrcListener(bottom.Client):
def __init__(self, irc_nickname=None, irc_password=None, ssl=True):
self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12)
self.password = irc_password or None
self.results = {} # by hostname
self.servers = set()
self.all_done = asyncio.Event()
super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl)
# setup event handling
self.on('CLIENT_CONNECT', self.connected)
self.on('PING', self.keepalive)
self.on('JOIN', self.joined)
self.on('RPL_NAMREPLY', self.got_users)
self.on('RPL_WHOREPLY', self.got_who_reply)
self.on("client_disconnect", self.reconnect)
self.on('RPL_ENDOFNAMES', self.got_end_of_names)
async def collect_data(self):
# start it process
self.loop.create_task(self.connect())
# wait until done
await self.all_done.wait()
# return the results
return self.results
def connected(self, **kwargs):
logger.debug("Connected")
self.send('NICK', nick=self.my_nick)
self.send('USER', user=self.my_nick, realname='Connectrum Client')
# long delay here as it does an failing Ident probe (10 seconds min)
self.send('JOIN', channel='#electrum')
#self.send('WHO', mask='E_*')
def keepalive(self, message, **kwargs):
self.send('PONG', message=message)
async def joined(self, nick=None, **kwargs):
# happens when we or someone else joins the channel
# seem to take 10 seconds or longer for me to join
logger.debug('Joined: %r' % kwargs)
if nick != self.my_nick:
await self.add_server(nick)
async def got_who_reply(self, nick=None, real_name=None, **kws):
'''
Server replied to one of our WHO requests, with details.
'''
#logger.debug('who reply: %r' % kws)
nick = nick[2:] if nick[0:2] == 'E_' else nick
host, ports = real_name.split(' ', 1)
self.servers.remove(nick)
logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports)
self.results[host.lower()] = ServerInfo(nick, host, ports)
if not self.servers:
self.all_done.set()
async def got_users(self, users=[], **kws):
# After successful join to channel, we are given a list of
# users on the channel. Happens a few times for busy channels.
logger.debug('Got %d (more) users in channel', len(users))
for nick in users:
await self.add_server(nick)
async def add_server(self, nick):
# ignore everyone but electrum servers
if nick.startswith('E_'):
self.servers.add(nick[2:])
async def who_worker(self):
# Fetch details on each Electrum server nick we see
logger.debug('who task starts')
copy = self.servers.copy()
for nn in copy:
logger.debug('do WHO for: ' + nn)
self.send('WHO', mask='E_'+nn)
logger.debug('who task done')
def got_end_of_names(self, *a, **k):
logger.debug('Got all the user names')
assert self.servers, "No one on channel!"
# ask for details on all of those users
self.loop.create_task(self.who_worker())
async def reconnect(self, **kwargs):
# Trigger an event that may cascade to a client_connect.
# Don't continue until a client_connect occurs, which may be never.
logger.warn("Disconnected (will reconnect)")
# Note that we're not in a coroutine, so we don't have access
# to await and asyncio.sleep
time.sleep(3)
# After this line we won't necessarily be connected.
# We've simply scheduled the connect to happen in the future
self.loop.create_task(self.connect())
logger.debug("Reconnect scheduled.")
if __name__ == '__main__':
import logging
logging.getLogger('bottom').setLevel(logging.DEBUG)
logging.getLogger('connectrum').setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
bot = IrcListener(ssl=False)
bot.loop.set_debug(True)
fut = bot.collect_data()
#bot.loop.create_task(bot.connect())
rv = bot.loop.run_until_complete(fut)
print(rv)
| mit |
popazerty/e2_sh4 | tools/host_tools/FormatConverter/datasource.py | 112 | 2916 | from input import inputChoices
class datasource:
def __init__(self):
self.clear()
def setDatasources(self, datasources):
self.datasources = datasources
def getCapabilities(self):
return []
def getName(self):
return "N/A"
def getStatus(self):
text = str(len(self.transponderlist.keys())) + " Satellites" + "\n"
return text
def printAll(self):
for sat in self.transponderlist.keys():
print "***********"
print "sat:", sat, self.satnames[sat]
for transponder in self.transponderlist[sat]:
print transponder
def clear(self):
self.transponderlist = {}
self.satnames = {}
def read(self):
pass
def write(self):
pass
def addSat(self, satname, satpos):
if not self.transponderlist.has_key(satpos):
self.transponderlist[satpos] = []
self.satnames[satpos] = satname
def addTransponder(self, satpos, transponder):
if len(transponder.keys()) >= 6:
self.transponderlist[satpos].append(transponder)
class genericdatasource(datasource):
def __init__(self):
datasource.__init__(self)
self.source = self.destination = None
def getName(self):
return "Generic Datasource"
def getCapabilities(self):
return [("copy data from one source to another", self.copy), ("merge data from one source into another", self.merge)]
def copy(self):
self.copymerge(action = "copy")
def merge(self):
self.copymerge(action = "merge")
def copymerge(self, action = "copy"):
choice = -1
while choice is not None:
choice = inputChoices(["select source", "select destination", "copy now!"])
if choice == 0:
print "\nselect source:"
self.source = self.selectDatasource()
elif choice == 1:
print "\nselect destination"
self.destination = self.selectDatasource()
elif choice == 2:
self.docopymerge(action)
def docopymerge(self, action = "copy"):
if self.source is None:
print "select a source first!"
elif self.destination is None:
print "select a destination first!"
else:
if action == "copy":
print "copying ",
elif action == "merge":
print "merging ",
print "from %s to %s" % (self.source.getName(), self.destination.getName())
countsat = 0
counttransponder = 0
if action == "copy":
self.destination.clear()
for satpos in self.source.transponderlist.keys():
countsat += 1
self.destination.addSat(self.source.satnames[satpos], satpos)
for transponder in self.source.transponderlist[satpos]:
counttransponder += 1
self.destination.addTransponder(satpos, transponder)
print "copied %d sats with %d transponders" % (countsat, counttransponder)
def selectDatasource(self):
list = []
sources = []
for source in self.datasources:
if source != self:
list.append(source.getName() + (" (%d sats)" % len(source.transponderlist.keys())))
sources.append(source)
choice = inputChoices(list)
if choice is None:
return None
return sources[choice] | gpl-2.0 |
wilima/cryptography | tests/test.py | 1 | 3828 | import unittest
from cryptography import (eratosthenes, euler, extended_gcd, factorization,
gcd, modular_multiplicative_inverse)
from cryptography.ciphers import affine, shift, substitution, vigener
from .context import cryptography
class GcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_gcd(self):
self.assertEqual(
gcd.gcd(1071, 462),
21)
def test_gcd2(self):
self.assertEqual(
gcd.gcd(270, 192),
6)
class ExtendedGcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_extended_gcd(self):
self.assertEqual(
extended_gcd.extended_gcd(1914, 899),
(29, 8, -17))
class ModularInverseTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_modular_inverse(self):
self.assertEqual(
modular_multiplicative_inverse.inverse(5, 26),
21)
class FactorizationTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_factorization(self):
self.assertEqual(
factorization.integer_factorization(315),
[3, 3, 5, 7])
class EratosthenesTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_eratosthenes_sieve(self):
self.assertEqual(
eratosthenes.eratosthenes_sieve(20),
[2, 3, 5, 7, 11, 13, 17, 19])
class EulerFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_euler_function(self):
self.assertEqual(
euler.euler_function(1),
1)
def test_euler_function2(self):
self.assertEqual(
euler.euler_function(5),
4)
class ShiftCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_shift_encrypt_function(self):
self.assertEqual(
shift.encrypt('BARBARIUTOCI', 3),
'eduedulxwrfl'.upper())
def test_shift_decrypt_function(self):
self.assertEqual(
shift.decrypt('eduedulxwrfl', 3),
'BARBARIUTOCI')
def test_shift_crack_function(self):
self.assertEqual(
'BARBARIUTOCI' in shift.crack('eduedulxwrfl', 26),
True)
class AffineCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_affine_encrypt_function(self):
self.assertEqual(
affine.encrypt('THEINITIAL', (5, 9)),
'ASDXWXAXJM')
def test_affine_decrypt_function(self):
self.assertEqual(
affine.decrypt('ASDXWXAXJM', (5, 9)),
'THEINITIAL')
def test_affine_crack_function(self):
self.assertEqual(
'THEINITIAL' in affine.crack('ASDXWXAXJM', 26),
True)
class SubstitutionCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_substitution_encrypt_function(self):
self.assertEqual(
substitution.encrypt('FLEEATONCEWEAREDISCOVERED', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'SIAAZQLKBAVAZOARFPBLUAOAR')
def test_substitution_decrypt_function(self):
self.assertEqual(
substitution.decrypt('SIAAZQLKBAVAZOARFPBLUAOAR', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'FLEEATONCEWEAREDISCOVERED')
class VigenerCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_vigener_encrypt_function(self):
self.assertEqual(
vigener.encrypt('KULTURNIATASEJESPION', 'PES'),
'ZYDIYJCMSIEKTNWHTADR')
def test_vigener_decrypt_function(self):
self.assertEqual(
vigener.decrypt('ZYDIYJCMSIEKTNWHTADR', 'PES'),
'KULTURNIATASEJESPION')
if __name__ == '__main__':
unittest.main()
| mit |
scottdangelo/RemoveVolumeMangerLocks | cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py | 5 | 5546 | # Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
mock.Mock(return_value=capacity))
result = self.driver._get_pool_stats()
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'provisioned_capacity_gb': 4456.0}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
| apache-2.0 |
mvesper/invenio | modules/websubmit/lib/functions/Test_Status.py | 3 | 3087 | # This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Test_Status
## This function checks whether the document is still waiting
## for approval or not.
## Author: T.Baron
##
## PARAMETERS: -
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
def Test_Status(parameters, curdir, form, user_info=None):
"""
This function checks whether the considered document has been
requested for approval and is still waiting for approval. It also
checks whether the password stored in file 'password' of the
submission directory corresponds to the password associated with
the document.
"""
global rn
res = run_sql("SELECT status, access FROM sbmAPPROVAL WHERE rn=%s", (rn,))
if len(res) == 0:
raise InvenioWebSubmitFunctionStop(printNotRequested(rn))
else:
if res[0][0] == "approved":
raise InvenioWebSubmitFunctionStop(printApproved(rn))
elif res[0][0] == "rejected":
raise InvenioWebSubmitFunctionStop(printRejected(rn))
return ""
def printNotRequested(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has never been asked for approval.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
def printApproved(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has already been approved.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
def printRejected(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has already been rejected.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
| gpl-2.0 |
librasungirl/openthread | tools/harness-automation/cases_R140/leader_9_2_4.py | 18 | 1877 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_9_2_4(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '9 2 4'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
barbagroup/PetIBM | examples/ibpm/cylinder2dRe40/scripts/plotVorticity.py | 4 | 1401 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 2000 time steps (20 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 2000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-3.0, 3.0, 16)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-1.0, 4.0)
ax.set_ylim(-2.0, 2.0)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
bitcity/django | tests/defer_regress/models.py | 282 | 2692 | """
Regression tests for defer() / only() behavior.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=15)
text = models.TextField(default="xyzzy")
value = models.IntegerField()
other_value = models.IntegerField(default=0)
def __str__(self):
return self.name
class RelatedItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
class ProxyRelated(RelatedItem):
class Meta:
proxy = True
class Child(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
@python_2_unicode_compatible
class Leaf(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.CASCADE)
second_child = models.ForeignKey(Child, models.SET_NULL, related_name="other", null=True)
value = models.IntegerField(default=42)
def __str__(self):
return self.name
class ResolveThis(models.Model):
num = models.FloatField()
name = models.CharField(max_length=16)
class Proxy(Item):
class Meta:
proxy = True
@python_2_unicode_compatible
class SimpleItem(models.Model):
name = models.CharField(max_length=15)
value = models.IntegerField()
def __str__(self):
return self.name
class Feature(models.Model):
item = models.ForeignKey(SimpleItem, models.CASCADE)
class SpecialFeature(models.Model):
feature = models.ForeignKey(Feature, models.CASCADE)
class OneToOneItem(models.Model):
item = models.OneToOneField(Item, models.CASCADE, related_name="one_to_one_item")
name = models.CharField(max_length=15)
class ItemAndSimpleItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
simple = models.ForeignKey(SimpleItem, models.CASCADE)
class Profile(models.Model):
profile1 = models.CharField(max_length=1000, default='profile1')
class Location(models.Model):
location1 = models.CharField(max_length=1000, default='location1')
class Request(models.Model):
profile = models.ForeignKey(Profile, models.SET_NULL, null=True, blank=True)
location = models.ForeignKey(Location, models.CASCADE)
items = models.ManyToManyField(Item)
request1 = models.CharField(default='request1', max_length=1000)
request2 = models.CharField(default='request2', max_length=1000)
request3 = models.CharField(default='request3', max_length=1000)
request4 = models.CharField(default='request4', max_length=1000)
class Base(models.Model):
text = models.TextField()
class Derived(Base):
other_text = models.TextField()
| bsd-3-clause |
zstyblik/infernal-twin | sql_insert.py | 1 | 3025 | import MySQLdb
import db_connect_creds
from datetime import datetime
username, password = db_connect_creds.read_creds()
cxn = MySQLdb.connect('localhost', user=username, passwd=password)
date = datetime.now()
cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
cxn.commit()
cxn.close()
cxn = MySQLdb.connect(db='InfernalWireless')
cur = cxn.cursor()
current_project_id = 0
#~ cxn = MySQLdb.connect('localhost','root',"")
#~
#~ date = datetime.now()
#~
#~
#~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
#~
#~ cxn.commit()
#~ cxn.close()
#~
#~ cxn = MySQLdb.connect(db='InfernalWireless')
#~
#~ cur = cxn.cursor()
#~
#~ current_project_id = 0
def create_project_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
#~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT")
PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects (
ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)'''
cur.execute(PROJECT_TITLE)
create_project_table()
def project_details(projectname, Authors_name, TargetName, date):
PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date)
cur.execute(PROJECT_DETAILS)
current_project_id_tmp = cur.lastrowid
current_project_id = current_project_id_tmp
print "report is generated"
return current_project_id_tmp
def create_report_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))'''
cur.execute(report_table)
create_report_table()
def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id):
########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE
pID = current_project_id
REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id)
cur.execute(REPORT_DETAILS)
print pID
def print_hello(test_data):
print test_data
################ DB POPULATE DATABASE ###########
#~ prID = project_details('test','est','23s','12/12/12')
#~
#~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID))
################################################################### DUMMY DATABASE QUERIES ##############
#~ print type(prID)
cur.close()
cxn.commit()
cxn.close()
print "DB has been updated"
| gpl-3.0 |
javierag/samba | python/samba/tests/__init__.py | 3 | 8238 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
import subprocess
import tempfile
samba.ensure_external_module("mimeparse", "mimeparse")
samba.ensure_external_module("extras", "extras")
samba.ensure_external_module("testtools", "testtools")
# Other modules import these two classes from here, for convenience:
from testtools.testcase import (
TestCase as TesttoolsTestCase,
TestSkipped,
)
class TestCase(TesttoolsTestCase):
"""A Samba test case."""
def setUp(self):
super(TestCase, self).setUp()
test_debug_level = os.getenv("TEST_DEBUG_LEVEL")
if test_debug_level is not None:
test_debug_level = int(test_debug_level)
self._old_debug_level = samba.get_debug_level()
samba.set_debug_level(test_debug_level)
self.addCleanup(samba.set_debug_level, test_debug_level)
def get_loadparm(self):
return env_loadparm()
def get_credentials(self):
return cmdline_credentials
class LdbTestCase(TesttoolsTestCase):
"""Trivial test case for running tests against a LDB."""
def setUp(self):
super(LdbTestCase, self).setUp()
self.filename = os.tempnam()
self.ldb = samba.Ldb(self.filename)
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
class TestCaseInTempDir(TestCase):
def setUp(self):
super(TestCaseInTempDir, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(self._remove_tempdir)
def _remove_tempdir(self):
self.assertEquals([], os.listdir(self.tempdir))
os.rmdir(self.tempdir)
self.tempdir = None
def env_loadparm():
lp = param.LoadParm()
try:
lp.load(os.environ["SMB_CONF_PATH"])
except KeyError:
raise KeyError("SMB_CONF_PATH not set")
return lp
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class ValidNetbiosNameTests(TestCase):
def test_valid(self):
self.assertTrue(samba.valid_netbios_name("FOO"))
def test_too_long(self):
self.assertFalse(samba.valid_netbios_name("FOO"*10))
def test_invalid_characters(self):
self.assertFalse(samba.valid_netbios_name("*BLA"))
class BlackboxProcessError(Exception):
"""This is raised when check_output() process returns a non-zero exit status
Exception instance should contain the exact exit code (S.returncode),
command line (S.cmd), process output (S.stdout) and process error stream
(S.stderr)
"""
def __init__(self, returncode, cmd, stdout, stderr):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command '%s'; exit status %d; stdout: '%s'; stderr: '%s'" % (self.cmd, self.returncode,
self.stdout, self.stderr)
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def _make_cmdline(self, line):
bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../bin"))
parts = line.split(" ")
if os.path.exists(os.path.join(bindir, parts[0])):
parts[0] = os.path.join(bindir, parts[0])
line = " ".join(parts)
return line
def check_run(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
def check_output(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
return p.stdout.read()
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False, global_schema=True):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:param global_schema: Whether to use global schema.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
samdb_url = samdb_url.lower()
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options,
global_schema=global_schema)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
def delete_force(samdb, dn):
try:
samdb.delete(dn)
except ldb.LdbError, (num, _):
assert(num == ldb.ERR_NO_SUCH_OBJECT)
| gpl-3.0 |
martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/rlcompleter.py | 61 | 6036 | """Word completion for GNU readline 2.0.
This requires the latest extension to the readline module. The completer
completes keywords, built-ins and globals in a selectable namespace (which
defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the
completion key (twice), and see the list of names defined by the
sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
__getattr__ hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import __builtin__
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError,'namespace must be a dictionary'
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if hasattr(val, '__call__'):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [__builtin__.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
| gpl-3.0 |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/units.py | 2 | 6084 | """
The classes here provide support for using custom classes with
matplotlib, e.g., those that do not expose the array interface but know
how to convert themselves to arrays. It also supports classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation;
rather a units implementation must provide the register with the Registry
converter dictionary and a ConversionInterface. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
'convert value to a scalar or array'
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
return 'date'
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.cbook import iterable, is_numlike, safe_first_element
import numpy as np
class AxisInfo(object):
"""information to support default axis labeling and tick labeling, and
default limits"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
default_limits: the default min, max of the axis if no data is present
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface(object):
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
@staticmethod
def axisinfo(unit, axis):
'return an units.AxisInfo instance for axis with the specified units'
return None
@staticmethod
def default_units(x, axis):
'return the default unit for x or None for the given axis'
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
return the converted sequence. The output must be a sequence of
scalars that can be used by the numpy array layer
"""
return obj
@staticmethod
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
# DISABLED idx = id(x)
# DISABLED cached = self._cached.get(idx)
# DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except AttributeError:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None:
try:
thisx = safe_first_element(x)
except (TypeError, StopIteration):
pass
else:
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
# DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
dkerwin/ansible-modules-core | network/cumulus/cl_bond.py | 5 | 15552 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bonding found in the
Cumulus User Guide at http://docs.cumulusnetworks.com
options:
name:
description:
- name of the interface
required: true
alias_name:
description:
- add a port description
ipv4:
description:
- list of IPv4 addresses to configure on the interface.
use X.X.X.X/YY syntax.
ipv6:
description:
- list of IPv6 addresses to configure on the interface.
use X:X:X::X/YYY syntax
addr_method:
description:
- configures the port to use DHCP.
To enable this feature use the option 'dhcp'
choices: ['dhcp']
mtu:
description:
- set MTU. Configure Jumbo Frame by setting MTU to 9000.
virtual_ip:
description:
- define IPv4 virtual IP used by the Cumulus Linux VRR feature
virtual_mac:
description:
- define Ethernet mac associated with Cumulus Linux VRR feature
vids:
description:
- in vlan aware mode, lists vlans defined under the interface
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode
mstpctl_portadminedge:
description:
- Enables admin edge port
clag_id:
description:
- specify a unique clag_id for every dual connected bond on each
peer switch. The value must be between 1 and 65535 and must be the
same on both peer switches in order for the bond to be considered
dual-connected
pvid:
description:
- in vlan aware mode, defines vlan that is the untagged vlan
miimon:
description:
- mii link monitoring interval
default: 100
mode:
description:
- bond mode. as of Cumulus Linux 2.5 only LACP bond mode is
supported
default: '802.3ad'
min_links:
description:
- minimum number of links
default: 1
lacp_bypass_allow:
description:
- Enable LACP bypass.
lacp_bypass_period:
description:
- Period for enabling LACP bypass. Max value is 900.
lacp_bypass_priority:
description:
- List of ports and priorities. Example "swp1=10, swp2=20"
lacp_bypass_all_active:
description:
- Activate all interfaces for bypass.
It is recommended to configure all_active instead
of using bypass_priority.
lacp_rate:
description:
- lacp rate
default: 1
slaves:
description:
- bond members
required: True
xmit_hash_policy:
description:
- transmit load balancing algorithm. As of Cumulus Linux 2.5 only
layer3+4 policy is supported
default: layer3+4
location:
description:
- interface directory location
default:
- /etc/network/interfaces.d
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- because the module writes the interface directory location. Ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
cl_bond: name=bond0 slaves="swp4-5" ipv4=10.1.1.1/24
notify: reload networking
# configure bond as a dual-connected clag bond
cl_bond: name=bond1 slaves="swp1s0 swp2s0" clag_id=1
notify: reload networking
# define cl_bond once in tasks file
# then write inteface config in variables file
# with just the options you want.
cl_bond:
name: "{{ item.key }}"
slaves: "{{ item.value.slaves }}"
clag_id: "{{ item.value.clag_id|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: cl_bonds
notify: reload networking
# In vars file
# ============
cl_bonds:
bond0:
alias_name: 'uplink to isp'
slaves: ['swp1', 'swp3']
ipv4: '10.1.1.1/24'
bond2:
vids: [1, 50]
clag_id: 1
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def conv_array_to_str(_value):
if isinstance(_value, list):
return ' '.join(_value)
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = conv_array_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bondmems):
"""
goes through each bond member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bondmems, list):
for _entry in _bondmems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bondmems
def build_bond_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bond-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['slaves', 'mode', 'xmit_hash_policy',
'miimon', 'lacp_rate', 'lacp_bypass_allow',
'lacp_bypass_period', 'lacp_bypass_all_active',
'min_links']:
build_bond_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge'
'mstpctl_bpduguard', 'clag_id',
'lacp_bypass_priority']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
slaves=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clag_id=dict(type='str'),
min_links=dict(type='int', default=1),
mode=dict(type='str', default='802.3ad'),
miimon=dict(type='int', default=100),
xmit_hash_policy=dict(type='str', default='layer3+4'),
lacp_rate=dict(type='int', default=1),
lacp_bypass_allow=dict(type='int', choices=[0, 1]),
lacp_bypass_all_active=dict(type='int', choices=[0, 1]),
lacp_bypass_priority=dict(type='list'),
lacp_bypass_period=dict(type='int'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']],
required_together=[['virtual_ip', 'virtual_mac']]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.iteritems():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
| gpl-3.0 |
JianyuWang/nova | nova/tests/unit/network/security_group/test_neutron_driver.py | 9 | 18614 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from mox3 import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from six.moves import range
from nova import context
from nova import exception
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import neutron_driver
from nova import test
class TestNeutronDriver(test.NoDBTestCase):
def setUp(self):
super(TestNeutronDriver, self).setUp()
self.mox.StubOutWithMock(neutronapi, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
def test_list_with_project(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=project_id)
def test_list_with_all_tenants_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context,
project=project_id,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with()
def test_list_without_all_tenants_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id)
mock_list_secgroup.assert_called_once_with(tenant_id=project_id)
def test_list_with_all_tenants_sec_name_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_group_names = ['secgroup_ssh']
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id,
names=security_group_names,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
name=security_group_names,
tenant_id=project_id)
def test_list_with_all_tenants_sec_name_ids_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_group_names = ['secgroup_ssh']
security_group_ids = ['id1']
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id,
names=security_group_names,
ids=security_group_ids,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
name=security_group_names,
id=security_group_ids,
tenant_id=project_id)
def test_list_with_all_tenants_not_admin(self):
search_opts = {'all_tenants': 1}
security_groups_list = {'security_groups': []}
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=self.context.tenant,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
tenant_id=self.context.tenant)
def test_get_with_name_duplicated(self):
sg_name = 'web_server'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
expected_sg = {'security_group': {'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server', 'rules': []}}
self.moxed_client.show_security_group(expected_sg_id).AndReturn(
expected_sg)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
observed_sg = sg_api.get(self.context, name=sg_name)
expected_sg['security_group']['project_id'] = self.context.tenant
del expected_sg['security_group']['tenant_id']
self.assertEqual(expected_sg['security_group'], observed_sg)
def test_get_with_invalid_name(self):
sg_name = 'invalid_name'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
self.moxed_client.show_security_group(expected_sg_id).AndRaise(
TypeError)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupNotFound,
sg_api.get, self.context, name=sg_name)
def test_create_security_group_with_bad_request(self):
name = 'test-security-group'
description = None
body = {'security_group': {'name': name,
'description': description}}
message = "Invalid input. Reason: 'None' is not a valid string."
self.moxed_client.create_security_group(
body).AndRaise(n_exc.BadRequest(message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.Invalid,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_exceed_quota(self):
name = 'test-security-group'
description = 'test-security-group'
body = {'security_group': {'name': name,
'description': description}}
message = "Quota exceeded for resources: ['security_group']"
self.moxed_client.create_security_group(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_rules_exceed_quota(self):
vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'from_port': 1025, 'to_port': 1025}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 1025, 'port_range_min': 1025,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "Quota exceeded for resources: ['security_group_rule']"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
def test_create_security_group_rules_bad_request(self):
vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'to_port': 255}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'icmp',
'ethertype': 'IPv4', 'port_range_max': 255,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "ICMP code (port-range-max) 255 is provided but ICMP type" \
" (port-range-min) is missing"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=400,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.Invalid, sg_api.add_rules,
self.context, None, name, [vals])
def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
sg1 = {'description': 'default',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
'security_group_rules':
[{'direction': 'ingress',
'ethertype': 'IPv4',
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
'port_range_max': None,
'port_range_min': None,
'protocol': '51',
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id':
'07f1362f-34f6-4136-819a-2dcde112269e',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
self.moxed_client.list_security_groups().AndReturn(
{'security_groups': [sg1]})
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.list(self.context)
expected = [{'rules':
[{'from_port': -1, 'protocol': '51', 'to_port': -1,
'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
'cidr': '0.0.0.0/0', 'group_id': None,
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
'project_id': 'c166d9316f814891bcb66b96c4c891d6',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default', 'description': 'default'}]
self.assertEqual(expected, result)
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
servers = [{'id': server_id}]
ports = [{'id': port1_id, 'device_id': server_id,
'security_groups': [sg1_id]},
{'id': port2_id, 'device_id': server_id,
'security_groups': [sg2_id]}]
port_list = {'ports': ports}
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
port_list)
self.moxed_client.list_security_groups(
id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def _test_instances_security_group_bindings_scale(self, num_servers):
max_query = 150
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
servers = []
device_ids = []
ports = []
sg_bindings = {}
for i in range(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
device_ids.append(server_id)
ports.append({'id': port_id,
'device_id': server_id,
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in range(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})
self.moxed_client.list_security_groups(
id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instances_security_group_bindings_less_than_max(self):
self._test_instances_security_group_bindings_scale(100)
def test_instances_security_group_bindings_max(self):
self._test_instances_security_group_bindings_scale(150)
def test_instances_security_group_bindings_more_then_max(self):
self._test_instances_security_group_bindings_scale(300)
def test_instances_security_group_bindings_with_hidden_sg(self):
servers = [{'id': 'server_1'}]
ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
port_list = {'ports': ports}
sg1 = {'id': '1', 'name': 'wol'}
# User doesn't have access to sg2
security_groups_list = {'security_groups': [sg1]}
sg_bindings = {'dev_1': [{'name': 'wol'}]}
self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
port_list)
self.moxed_client.\
list_security_groups(id=mox.SameElementsAs(['1', '2'])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instance_empty_security_groups(self):
port_list = {'ports': [{'id': 1, 'device_id': '1',
'security_groups': []}]}
self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instance_security_groups(self.context, '1')
self.assertEqual([], result)
class TestNeutronDriverWithoutMock(test.NoDBTestCase):
def test_validate_property(self):
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.validate_property('foo', 'name', None)
sg_api.validate_property('', 'name', None)
self.assertRaises(exception.Invalid, sg_api.validate_property,
'a' * 256, 'name', None)
self.assertRaises(exception.Invalid, sg_api.validate_property,
None, 'name', None)
| apache-2.0 |
kevinlee12/oppia | core/domain/draft_upgrade_services_test.py | 1 | 56055 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for draft upgrade services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import draft_upgrade_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.tests import test_utils
import feconf
import python_utils
import utils
class DraftUpgradeUnitTests(test_utils.GenericTestBase):
"""Test the draft upgrade services module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
OTHER_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'New title'
})]
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '0',
'to_version': python_utils.UNICODE(
feconf.CURRENT_STATE_SCHEMA_VERSION)
})]
DRAFT_CHANGELIST = [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'old_value': None,
'new_value': 'Updated title'})]
def setUp(self):
super(DraftUpgradeUnitTests, self).setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def test_try_upgrade_with_no_version_difference(self):
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, 1, self.EXP_ID))
def test_try_upgrade_raises_exception_if_versions_are_invalid(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'Current draft version is greater than the exploration version.'):
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 2, 1, self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unsupported_commit_type(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unimplemented_upgrade_methods(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.EXP_MIGRATION_CHANGE_LIST,
'Ran Exploration Migration job.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_extract_html_from_draft_change_list(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': html_content,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': html_content,
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
list_of_html = (
draft_upgrade_services.extract_html_from_draft_change_list(
draft_change_list))
self.assertEqual(len(list_of_html), 27)
expected_html_strings = [
html_content, '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>',
'<p>This is solution for state1</p>', 'Hey!', 'hello!']
for html in list_of_html:
self.assertTrue(html in expected_html_strings)
class DraftUpgradeUtilUnitTests(test_utils.GenericTestBase):
"""Test the DraftUpgradeUtil module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '34',
'to_version': '35'
})]
# EXP_ID and USER_ID used to create default explorations.
EXP_ID = 'exp_id'
USER_ID = 'user_id'
def create_and_migrate_new_exploration(
self, current_schema_version, target_schema_version):
"""Creates an exploration and applies a state schema migration to it.
Creates an exploration and migrates its state schema from version
current_schema_version to target_schema_version. Asserts that the
exploration was successfully migrated.
Args:
current_schema_version: string. The current schema version of the
exploration (eg. '29').
target_schema_version: string. The schema version to upgrade
the exploration to (eg. '30').
"""
# Create an exploration change list with the command that will migrate
# the schema from current_schema_version to target_schema_version.
exp_migration_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': current_schema_version,
'to_version': target_schema_version
})
]
# The migration will automatically migrate the exploration to the latest
# state schema version, so we set the latest schema version to be the
# target_schema_version.
with self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION',
int(target_schema_version)):
# Create and migrate the exploration.
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, exp_migration_change_list,
'Ran Exploration Migration job.')
# Assert that the update was applied and that the exploration state
# schema was successfully updated.
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertEqual(
python_utils.UNICODE(
exploration.states_schema_version),
target_schema_version)
def test_convert_to_latest_schema_version_implemented(self):
state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION
conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % (
state_schema_version - 1, state_schema_version)
self.assertTrue(
hasattr(
draft_upgrade_services.DraftUpgradeUtil, conversion_fn_name),
msg='Current schema version is %d but DraftUpgradeUtil.%s is '
'unimplemented.' % (state_schema_version, conversion_fn_name))
def test_convert_states_v36_dict_to_v37_dict(self):
draft_change_list_v36 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'CaseSensitiveEquals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_v37 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
# Migrate exploration to state schema version 37.
self.create_and_migrate_new_exploration('36', '37')
migrated_draft_change_list_v37 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v36, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v37_dict_list = [
change.to_dict() for change in draft_change_list_v37
]
migrated_draft_change_list_v37_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v37
]
self.assertEqual(
draft_change_list_v37_dict_list,
migrated_draft_change_list_v37_dict_list)
def test_convert_states_v35_dict_to_v36_dict(self):
draft_change_list_1_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': {}
})
]
draft_change_list_2_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 36.
self.create_and_migrate_new_exploration('35', '36')
migrated_draft_change_list_1_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v35, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v36)
migrated_draft_change_list_2_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v35, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v35_dict_list = [
change.to_dict() for change in draft_change_list_2_v35
]
migrated_draft_change_list_2_v36_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v36
]
self.assertEqual(
draft_change_list_2_v35_dict_list,
migrated_draft_change_list_2_v36_dict_list)
def test_convert_states_v34_dict_to_v35_dict(self):
draft_change_list_1_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'IsMathematicallyEquivalentTo',
'inputs': {
'x': 'x+y/2'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_2_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 35.
self.create_and_migrate_new_exploration('34', '35')
migrated_draft_change_list_1_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v34, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v35)
migrated_draft_change_list_2_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v34, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v34_dict_list = [
change.to_dict() for change in draft_change_list_2_v34
]
migrated_draft_change_list_2_v35_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v35
]
self.assertEqual(
draft_change_list_2_v34_dict_list,
migrated_draft_change_list_2_v35_dict_list)
def test_convert_states_v33_dict_to_v34_dict(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
self.create_and_migrate_new_exploration('33', '34')
migrated_draft_change_list = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list, 1, 2, self.EXP_ID))
self.assertEqual(
migrated_draft_change_list[0].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
expected_html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[1].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': expected_html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[expected_html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': expected_html_content,
'y': expected_html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[expected_html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': expected_html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}).to_dict())
self.assertEqual(
migrated_draft_change_list[2].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': expected_html_content
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[3].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': expected_html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': expected_html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[4].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': expected_html_content
},
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[5].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[expected_html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[6].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': expected_html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[7].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': expected_html_content
}
}]
}).to_dict())
def test_convert_states_v32_dict_to_v33_dict(self):
draft_change_list_v32 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Version 33 adds a showChoicesInShuffledOrder bool, which doesn't
# impact the second ExplorationChange because it will only impact
# it if 'choices' is the only key for new_value.
expected_draft_change_list_v33 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'showChoicesInShuffledOrder': {
'value': False
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Migrate exploration to state schema version 33.
self.create_and_migrate_new_exploration('32', '33')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v33 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v32, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v33_dict_list = [
change.to_dict() for change in expected_draft_change_list_v33
]
migrated_draft_change_list_v33_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v33
]
self.assertEqual(
expected_draft_change_list_v33_dict_list,
migrated_draft_change_list_v33_dict_list)
def test_convert_states_v31_dict_to_v32_dict(self):
draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 32.
self.create_and_migrate_new_exploration('31', '32')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 32 adds a customization arg
# for the "Add" button text in SetInput interaction for the
# exploration, for which there should be no changes to drafts.
migrated_draft_change_list_v32 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v31, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v31_dict_list = [
change.to_dict() for change in draft_change_list_v31
]
migrated_draft_change_list_v32_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v32
]
self.assertEqual(
draft_change_list_v31_dict_list,
migrated_draft_change_list_v32_dict_list)
def test_convert_states_v30_dict_to_v31_dict(self):
draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False
}
}
}
}
})
]
# Version 31 adds the duration_secs property.
expected_draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Migrate exploration to state schema version 31.
self.create_and_migrate_new_exploration('30', '31')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v31 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v30, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v31_dict_list = [
change.to_dict() for change in expected_draft_change_list_v31
]
migrated_draft_change_list_v31_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v31
]
self.assertEqual(
expected_draft_change_list_v31_dict_list,
migrated_draft_change_list_v31_dict_list)
def test_convert_states_v29_dict_to_v30_dict(self):
draft_change_list_v29 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_misconception_id': None
}
})
]
# Version 30 replaces the tagged_misconception_id in version 29
# with tagged_skill_misconception_id.
expected_draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}
})
]
# Migrate exploration to state schema version 30.
self.create_and_migrate_new_exploration('29', '30')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v30 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v29, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v30_dict_list = [
change.to_dict() for change in expected_draft_change_list_v30
]
migrated_draft_change_list_v30_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v30
]
self.assertEqual(
expected_draft_change_list_v30_dict_list,
migrated_draft_change_list_v30_dict_list)
def test_convert_states_v28_dict_to_v29_dict(self):
draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 29.
self.create_and_migrate_new_exploration('28', '29')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no change to the
# draft change list since version 29 adds the
# solicit_answer_details boolean variable to the exploration
# state, for which there should be no changes to drafts.
migrated_draft_change_list_v29 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v28, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v28_dict_list = [
change.to_dict() for change in draft_change_list_v28
]
migrated_draft_change_list_v29_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v29
]
self.assertEqual(
draft_change_list_v28_dict_list,
migrated_draft_change_list_v29_dict_list)
def test_convert_states_v27_dict_to_v28_dict(self):
draft_change_list_v27 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'content_ids_to_audio_translations',
'state_name': 'State B',
'new_value': 'new value',
})
]
# Version 28 adds voiceovers_mapping.
expected_draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'recorded_voiceovers',
'state_name': 'State B',
'new_value': {'voiceovers_mapping': 'new value'}
})
]
# Migrate exploration to state schema version 28.
self.create_and_migrate_new_exploration('27', '28')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v28 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v27, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v28_dict_list = [
change.to_dict() for change in expected_draft_change_list_v28
]
migrated_draft_change_list_v28_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v28
]
self.assertEqual(
expected_draft_change_list_v28_dict_list,
migrated_draft_change_list_v28_dict_list)
| apache-2.0 |
axinging/chromium-crosswalk | third_party/protobuf/python/google/protobuf/internal/text_format_test.py | 15 | 41879 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '[email protected] (Kenton Varda)'
import re
import six
import string
try:
import unittest2 as unittest
except ImportError:
import unittest
from google.protobuf.internal import _parameterized
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def TestQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters(
(unittest_pb2),
(unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642',
'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message, as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Parse, text, message)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'),
text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'),
text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'),
text_format.Parse, text, message)
message = message_module.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'),
text_format.Parse, text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self,
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string["abc"] = "123"
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message),
'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join((
'map_string_string {\n key: "%c"\n value: "dummy"\n}\n' % (letter,)
for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
def testMapOrderSemantics(self):
golden_lines = self.ReadGolden('map_test_data.txt')
# The C++ implementation emits defaulted-value fields, while the Python
# implementation does not. Adjusting for this is awkward, but it is
# valuable to test against a common golden file.
line_blacklist = (' key: 0\n',
' value: 0\n',
' key: false\n',
' value: false\n')
golden_lines = [line for line in golden_lines if line not in line_blacklist]
message = map_unittest_pb2.TestMap()
text_format.ParseLines(golden_lines, message)
candidate = text_format.MessageToString(message)
# The Python implementation emits "1.0" for the double value that the C++
# implementation emits as "1".
candidate = candidate.replace('1.0', '1', 2)
self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Parse known extension correcty.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self,
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Parse, text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(
67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'),
text_format.Parse, text, message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'),
text_format.Parse, text, message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual("123", message.map_string_string["abc"])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ')
tokenizer = text_format._Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
coderanger/pychef | chef/tests/test_search.py | 5 | 2531 | from unittest2 import skip
from chef import Search, Node
from chef.exceptions import ChefError
from chef.tests import ChefTestCase, mockSearch
class SearchTestCase(ChefTestCase):
def test_search_all(self):
s = Search('node')
self.assertGreaterEqual(len(s), 3)
self.assertIn('test_1', s)
self.assertIn('test_2', s)
self.assertIn('test_3', s)
def test_search_query(self):
s = Search('node', 'role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_list(self):
searches = Search.list()
self.assertIn('node', searches)
self.assertIn('role', searches)
def test_search_set_query(self):
s = Search('node').query('role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_search_call(self):
s = Search('node')('role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_rows(self):
s = Search('node', rows=1)
self.assertEqual(len(s), 1)
self.assertGreaterEqual(s.total, 3)
def test_start(self):
s = Search('node', start=1)
self.assertEqual(len(s), s.total-1)
self.assertGreaterEqual(s.total, 3)
def test_slice(self):
s = Search('node')[1:2]
self.assertEqual(len(s), 1)
self.assertGreaterEqual(s.total, 3)
s2 = s[1:2]
self.assertEqual(len(s2), 1)
self.assertGreaterEqual(s2.total, 3)
self.assertNotEqual(s[0]['name'], s2[0]['name'])
s3 = Search('node')[2:3]
self.assertEqual(len(s3), 1)
self.assertGreaterEqual(s3.total, 3)
self.assertEqual(s2[0]['name'], s3[0]['name'])
def test_object(self):
s = Search('node', 'name:test_1')
self.assertEqual(len(s), 1)
node = s[0].object
self.assertEqual(node.name, 'test_1')
self.assertEqual(node.run_list, ['role[test_1]'])
class MockSearchTestCase(ChefTestCase):
@mockSearch({
('node', '*:*'): [Node('fake_1', skip_load=True).to_dict()]
})
def test_single_node(self, MockSearch):
import chef.search
s = chef.search.Search('node')
self.assertEqual(len(s), 1)
self.assertIn('fake_1', s)
| apache-2.0 |
thesuperzapper/tensorflow | tensorflow/python/estimator/run_config.py | 7 | 1949 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return 1
@property
def save_summary_steps(self):
return 100
@property
def save_checkpoints_secs(self):
return 600
@property
def session_config(self):
return None
@property
def save_checkpoints_steps(self):
return None
@property
def keep_checkpoint_max(self):
return 5
@property
def keep_checkpoint_every_n_hours(self):
return 10000
@property
def model_dir(self):
return None
| apache-2.0 |
ksteinfe/decodes | src/decodes/core/dc_mesh.py | 1 | 6004 | from decodes.core import *
from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order
if VERBOSE_FS: print("mesh.py loaded")
import copy, collections
class Mesh(HasPts):
"""
a very simple mesh class
"""
subclass_attr = [] # this list of props is unset any time this HasPts object changes
def __init__(self, vertices=None, faces=None, basis=None):
""" Mesh Constructor.
:param vertices: The vertices of the mesh.
:type vertices: [Point]
:param faces: List of ordered faces.
:type faces: [int]
:param basis: The (optional) basis of the mesh.
:type basis: Basis
:result: Mesh object.
:rtype: Mesh
::
pts=[
Point(0,0,0),
Point(0,1,0),
Point(1,1,0),
Point(1,0,0),
Point(0,0,1),
Point(0,1,1),
Point(1,1,1),
Point(1,0,1),
]
quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]]
quadmesh=Mesh(pts,quad_faces)
"""
super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis
self._faces = [] if (faces is None) else faces
@property
def faces(self):
""" Returns a list of mesh faces.
:result: List of mesh faces.
:rtype: list
"""
return self._faces
def add_face(self,a,b,c,d=-1):
""" Adds a face to the mesh.
:param a,b,c,d: Face to be added to the list of faces.
:type a,b,c,d: int.
:result: Modifies list of faces.
:rtype: None
::
quadmesh.add_face(4,5,6,7)
"""
#TODO: add lists of faces just the same
if max(a,b,c,d) < len(self.pts):
if (d>=0) : self._faces.append([a,b,c,d])
else: self._faces.append([a,b,c])
def face_pts(self,index):
""" Returns the points of a given face.
:param index: Face's index
:type index: int
:returns: Vertices.
:rtype: Point
::
quadmesh.face_pts(0)
"""
return [self.pts[i] for i in self.faces[index]]
def face_centroid(self,index):
""" Returns the centroids of individual mesh faces.
:param index: Index of a face.
:type index: int
:returns: The centroid of a face.
:rtype: Point
::
quadmesh.face_centroid(0)
"""
return Point.centroid(self.face_pts(index))
def face_normal(self,index):
""" Returns the normal vector of a face.
:param index: Index of a face.
:type index: int
:returns: Normal vector.
:rtype: Vec
::
quadmesh.face_normal(0)
"""
verts = self.face_pts(index)
if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized()
else :
v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized()
v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized()
return Vec.bisector(v0,v1).normalized()
def __repr__(self):
return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces))
@staticmethod
def explode(msh):
""" Explodes a mesh into individual faces.
:param msh: Mesh to explode.
:type msh: Mesh
:returns: List of meshes.
:type: [Mesh]
::
Mesh.explode(quadmesh)
"""
exploded_meshes = []
for face in msh.faces:
pts = [msh.pts[v] for v in face]
nface = [0,1,2] if len(face)==3 else [0,1,2,3]
exploded_meshes.append(Mesh(pts,[nface]))
return exploded_meshes
def to_pt_graph(self):
""" Returns a Graph representation of the mesh points by index.
:returns: A Graph of point indexes.
:rtype: Graph
::
quadmesh.to_pt_graph()
"""
graph = Graph()
for index in range(len(self.pts)):
for face in self.faces:
for px in face:
if index in face and index!=px: graph.add_edge(index, px)
return graph
def to_face_graph(self, val=1):
""" Returns a Graph representation of the mesh faces by index.
:param val: number of coincident points for neighborness.
:type val: int
:returns: A Graph of face indexes.
:rtype: Graph
::
quadmesh.to_face_graph(2)
"""
from decodes.extensions.graph import Graph
graph = Graph()
graph.naked_nodes = []
for f1 in range(len(self.faces)):
for f2 in range(len(self.faces)):
if f1 != f2:
count = 0
for index in self.faces[f2]:
if index in self.faces[f1]:
count+=1
if count >= val:
graph.add_edge(f1,f2)
if len(graph.edges[f1]) < len(self.faces[f1]):
if f1 not in graph.naked_nodes:
graph.naked_nodes.append(f1)
return graph
| gpl-3.0 |
louyihua/edx-platform | lms/djangoapps/mobile_api/video_outlines/tests.py | 17 | 33728 | # -*- coding: utf-8 -*-
"""
Tests for video outline API
"""
import itertools
from uuid import uuid4
from collections import namedtuple
import ddt
from nose.plugins.attrib import attr
from edxval import api
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.video_module import transcripts_utils
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions import Group, UserPartition
from milestones.tests.utils import MilestonesTestCaseMixin
from mobile_api.models import MobileApiConfig
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, remove_user_from_cohort
from mobile_api.testutils import MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin
class TestVideoAPITestCase(MobileAPITestCase):
"""
Base test class for video related mobile APIs
"""
def setUp(self):
super(TestVideoAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
self.sub_section = ItemFactory.create(
parent=self.section,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
self.other_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega 2 \u03a9",
)
self.nameless_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=None,
)
self.edx_video_id = 'testing-123'
self.video_url = 'http://val.edx.org/val/video.mp4'
self.video_url_high = 'http://val.edx.org/val/video_high.mp4'
self.youtube_url = 'http://val.edx.org/val/youtube.mp4'
self.html5_video_url = 'http://video.edx.org/html5/video.mp4'
api.create_profile('youtube')
api.create_profile('mobile_high')
api.create_profile('mobile_low')
# create the video in VAL
api.create_video({
'edx_video_id': self.edx_video_id,
'status': 'test',
'client_video_id': u"test video omega \u03a9",
'duration': 12,
'courses': [unicode(self.course.id)],
'encoded_videos': [
{
'profile': 'youtube',
'url': 'xyz123',
'file_size': 0,
'bitrate': 1500
},
{
'profile': 'mobile_low',
'url': self.video_url,
'file_size': 12345,
'bitrate': 250
},
{
'profile': 'mobile_high',
'url': self.video_url_high,
'file_size': 99999,
'bitrate': 250
},
]})
# Set requested profiles
MobileApiConfig(video_profiles="mobile_low,mobile_high,youtube").save()
class TestVideoAPIMixin(object):
"""
Mixin class that provides helpers for testing video related mobile APIs
"""
def _create_video_with_subs(self, custom_subid=None):
"""
Creates and returns a video with stored subtitles.
"""
subid = custom_subid or uuid4().hex
transcripts_utils.save_subs_to_store(
{
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
},
subid,
self.course)
return ItemFactory.create(
parent=self.unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test video omega \u03a9",
sub=subid
)
def _verify_paths(self, course_outline, path_list, outline_index=0):
"""
Takes a path_list and compares it against the course_outline
Attributes:
course_outline (list): A list of dictionaries that includes a 'path'
and 'named_path' field which we will be comparing path_list to
path_list (list): A list of the expected strings
outline_index (int): Index into the course_outline list for which the
path is being tested.
"""
path = course_outline[outline_index]['path']
self.assertEqual(len(path), len(path_list))
for i in range(len(path_list)):
self.assertEqual(path_list[i], path[i]['name'])
#named_path will be deprecated eventually
named_path = course_outline[outline_index]['named_path']
self.assertEqual(len(named_path), len(path_list))
for i in range(len(path_list)):
self.assertEqual(path_list[i], named_path[i])
def _setup_course_partitions(self, scheme_id='random', is_cohorted=False):
"""Helper method to configure the user partitions in the course."""
self.partition_id = 0 # pylint: disable=attribute-defined-outside-init
self.course.user_partitions = [
UserPartition(
self.partition_id, 'first_partition', 'First Partition',
[Group(0, 'alpha'), Group(1, 'beta')],
scheme=None, scheme_id=scheme_id
),
]
self.course.cohort_config = {'cohorted': is_cohorted}
self.store.update_item(self.course, self.user.id)
def _setup_group_access(self, xblock, partition_id, group_ids):
"""Helper method to configure the partition and group mapping for the given xblock."""
xblock.group_access = {partition_id: group_ids}
self.store.update_item(xblock, self.user.id)
def _setup_split_module(self, sub_block_category):
"""Helper method to configure a split_test unit with children of type sub_block_category."""
self._setup_course_partitions()
self.split_test = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.unit,
category="split_test",
display_name=u"split test unit",
user_partition_id=0,
)
sub_block_a = ItemFactory.create(
parent=self.split_test,
category=sub_block_category,
display_name=u"split test block a",
)
sub_block_b = ItemFactory.create(
parent=self.split_test,
category=sub_block_category,
display_name=u"split test block b",
)
self.split_test.group_id_to_child = {
str(index): url for index, url in enumerate([sub_block_a.location, sub_block_b.location])
}
self.store.update_item(self.split_test, self.user.id)
return sub_block_a, sub_block_b
@attr(shard=2)
class TestNonStandardCourseStructure(MobileAPITestCase, TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests /api/mobile/v0.5/video_outlines/courses/{course_id} with no course set
"""
REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']}
def setUp(self):
super(TestNonStandardCourseStructure, self).setUp()
self.chapter_under_course = ItemFactory.create(
parent=self.course,
category="chapter",
display_name=u"test factory chapter under course omega \u03a9",
)
self.section_under_course = ItemFactory.create(
parent=self.course,
category="sequential",
display_name=u"test factory section under course omega \u03a9",
)
self.section_under_chapter = ItemFactory.create(
parent=self.chapter_under_course,
category="sequential",
display_name=u"test factory section under chapter omega \u03a9",
)
self.vertical_under_course = ItemFactory.create(
parent=self.course,
category="vertical",
display_name=u"test factory vertical under course omega \u03a9",
)
self.vertical_under_section = ItemFactory.create(
parent=self.section_under_chapter,
category="vertical",
display_name=u"test factory vertical under section omega \u03a9",
)
def test_structure_course_video(self):
"""
Tests when there is a video without a vertical directly under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(section_url, r'courseware$')
self.assertEqual(section_url, unit_url)
self._verify_paths(course_outline, [])
def test_structure_course_vert_video(self):
"""
Tests when there is a video under vertical directly under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.vertical_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_vertical_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory vertical under course omega \u03a9'
]
)
def test_structure_course_chap_video(self):
"""
Tests when there is a video directly under chapter
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.chapter_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
]
)
def test_structure_course_section_video(self):
"""
Tests when chapter is none, and video under section under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.section_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_section_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory section under course omega \u03a9',
]
)
def test_structure_course_chap_section_video(self):
"""
Tests when chapter and sequential exists, with a video with no vertical.
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.section_under_chapter,
category="video",
display_name=u"meow factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/$'
)
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
u'test factory section under chapter omega \u03a9',
]
)
def test_structure_course_section_vert_video(self):
"""
Tests chapter->section->vertical->unit
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.vertical_under_section,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/$'
)
)
self.assertRegexpMatches(
unit_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/1$'
)
)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
u'test factory section under chapter omega \u03a9',
u'test factory vertical under section omega \u03a9'
]
)
@attr(shard=2)
@ddt.ddt
class TestVideoSummaryList(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin,
TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/video_outlines/courses/{course_id}..
"""
REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']}
def test_only_on_web(self):
self.login_and_enroll()
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 0)
subid = uuid4().hex
transcripts_utils.save_subs_to_store(
{
'start': [100],
'end': [200],
'text': [
'subs #1',
]
},
subid,
self.course)
ItemFactory.create(
parent=self.unit,
category="video",
display_name=u"test video",
only_on_web=True,
subid=subid
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertIsNone(course_outline[0]["summary"]["video_url"])
self.assertIsNone(course_outline[0]["summary"]["video_thumbnail_url"])
self.assertEqual(course_outline[0]["summary"]["duration"], 0)
self.assertEqual(course_outline[0]["summary"]["size"], 0)
self.assertEqual(course_outline[0]["summary"]["name"], "test video")
self.assertEqual(course_outline[0]["summary"]["transcripts"], {})
self.assertIsNone(course_outline[0]["summary"]["language"])
self.assertEqual(course_outline[0]["summary"]["category"], "video")
self.assertTrue(course_outline[0]["summary"]["only_on_web"])
def test_mobile_api_config(self):
"""
Tests VideoSummaryList with different MobileApiConfig video_profiles
"""
self.login_and_enroll()
edx_video_id = "testing_mobile_high"
api.create_video({
'edx_video_id': edx_video_id,
'status': 'test',
'client_video_id': u"test video omega \u03a9",
'duration': 12,
'courses': [unicode(self.course.id)],
'encoded_videos': [
{
'profile': 'youtube',
'url': self.youtube_url,
'file_size': 2222,
'bitrate': 4444
},
{
'profile': 'mobile_high',
'url': self.video_url_high,
'file_size': 111,
'bitrate': 333
},
]})
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"testing mobile high video",
edx_video_id=edx_video_id,
)
expected_output = {
'category': u'video',
'video_thumbnail_url': None,
'language': u'en',
'name': u'testing mobile high video',
'video_url': self.video_url_high,
'duration': 12.0,
'transcripts': {
'en': 'http://testserver/api/mobile/v0.5/video_outlines/transcripts/{}/testing_mobile_high_video/en'.format(self.course.id) # pylint: disable=line-too-long
},
'only_on_web': False,
'encoded_videos': {
u'mobile_high': {
'url': self.video_url_high,
'file_size': 111
},
u'youtube': {
'url': self.youtube_url,
'file_size': 2222
}
},
'size': 111
}
# Testing when video_profiles='mobile_low,mobile_high,youtube'
course_outline = self.api_response().data
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
# Testing when there is no mobile_low, and that mobile_high doesn't show
MobileApiConfig(video_profiles="mobile_low,youtube").save()
course_outline = self.api_response().data
expected_output['encoded_videos'].pop('mobile_high')
expected_output['video_url'] = self.youtube_url
expected_output['size'] = 2222
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
# Testing where youtube is the default video over mobile_high
MobileApiConfig(video_profiles="youtube,mobile_high").save()
course_outline = self.api_response().data
expected_output['encoded_videos']['mobile_high'] = {
'url': self.video_url_high,
'file_size': 111
}
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
def test_video_not_in_val(self):
self.login_and_enroll()
self._create_video_with_subs()
ItemFactory.create(
parent=self.other_unit,
category="video",
edx_video_id="some_non_existent_id_in_val",
display_name=u"some non existent video in val",
html5_sources=[self.html5_video_url]
)
summary = self.api_response().data[1]['summary']
self.assertEqual(summary['name'], "some non existent video in val")
self.assertIsNone(summary['encoded_videos'])
self.assertIsNone(summary['duration'])
self.assertEqual(summary['size'], 0)
self.assertEqual(summary['video_url'], self.html5_video_url)
def test_course_list(self):
self.login_and_enroll()
self._create_video_with_subs()
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"test video omega 2 \u03a9",
html5_sources=[self.html5_video_url]
)
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"test video omega 3 \u03a9",
source=self.html5_video_url
)
ItemFactory.create(
parent=self.unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega \u03a9",
visible_to_staff_only=True,
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 3)
vid = course_outline[0]
self.assertIn('test_subsection_omega_%CE%A9', vid['section_url'])
self.assertIn('test_subsection_omega_%CE%A9/1', vid['unit_url'])
self.assertIn(u'test_video_omega_\u03a9', vid['summary']['id'])
self.assertEqual(vid['summary']['video_url'], self.video_url)
self.assertEqual(vid['summary']['size'], 12345)
self.assertIn('en', vid['summary']['transcripts'])
self.assertFalse(vid['summary']['only_on_web'])
self.assertEqual(course_outline[1]['summary']['video_url'], self.html5_video_url)
self.assertEqual(course_outline[1]['summary']['size'], 0)
self.assertFalse(course_outline[1]['summary']['only_on_web'])
self.assertEqual(course_outline[1]['path'][2]['name'], self.other_unit.display_name)
self.assertEqual(course_outline[1]['path'][2]['id'], unicode(self.other_unit.location))
self.assertEqual(course_outline[2]['summary']['video_url'], self.html5_video_url)
self.assertEqual(course_outline[2]['summary']['size'], 0)
self.assertFalse(course_outline[2]['summary']['only_on_web'])
def test_with_nameless_unit(self):
self.login_and_enroll()
ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(course_outline[0]['path'][2]['name'], self.nameless_unit.location.block_id)
def test_with_video_in_sub_section(self):
"""
Tests a non standard xml format where a video is underneath a sequential
We are expecting to return the same unit and section url since there is
no unit vertical.
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.sub_section,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"video in the sub section"
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(len(course_outline[0]['path']), 2)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertIn(
u'courseware/test_factory_section_omega_%CE%A9/test_subsection_omega_%CE%A9',
section_url
)
self.assertTrue(section_url)
self.assertTrue(unit_url)
self.assertEqual(section_url, unit_url)
@ddt.data(
*itertools.product([True, False], ["video", "problem"])
)
@ddt.unpack
def test_with_split_block(self, is_user_staff, sub_block_category):
"""Test with split_module->sub_block_category and for both staff and non-staff users."""
self.login_and_enroll()
self.user.is_staff = is_user_staff
self.user.save()
self._setup_split_module(sub_block_category)
video_outline = self.api_response().data
num_video_blocks = 1 if sub_block_category == "video" else 0
self.assertEqual(len(video_outline), num_video_blocks)
for block_index in range(num_video_blocks):
self._verify_paths(
video_outline,
[
self.section.display_name,
self.sub_section.display_name,
self.unit.display_name,
self.split_test.display_name
],
block_index
)
self.assertIn(u"split test block", video_outline[block_index]["summary"]["name"])
def test_with_split_vertical(self):
"""Test with split_module->vertical->video structure."""
self.login_and_enroll()
split_vertical_a, split_vertical_b = self._setup_split_module("vertical")
ItemFactory.create(
parent=split_vertical_a,
category="video",
display_name=u"video in vertical a",
)
ItemFactory.create(
parent=split_vertical_b,
category="video",
display_name=u"video in vertical b",
)
video_outline = self.api_response().data
# user should see only one of the videos (a or b).
self.assertEqual(len(video_outline), 1)
self.assertIn(u"video in vertical", video_outline[0]["summary"]["name"])
a_or_b = video_outline[0]["summary"]["name"][-1:]
self._verify_paths(
video_outline,
[
self.section.display_name,
self.sub_section.display_name,
self.unit.display_name,
self.split_test.display_name,
u"split test block " + a_or_b
],
)
def _create_cohorted_video(self, group_id):
"""Creates a cohorted video block, giving access to only the given group_id."""
video_block = ItemFactory.create(
parent=self.unit,
category="video",
display_name=u"video for group " + unicode(group_id),
)
self._setup_group_access(video_block, self.partition_id, [group_id])
def _create_cohorted_vertical_with_video(self, group_id):
"""Creates a cohorted vertical with a child video block, giving access to only the given group_id."""
vertical_block = ItemFactory.create(
parent=self.sub_section,
category="vertical",
display_name=u"vertical for group " + unicode(group_id),
)
self._setup_group_access(vertical_block, self.partition_id, [group_id])
ItemFactory.create(
parent=vertical_block,
category="video",
display_name=u"video for group " + unicode(group_id),
)
@ddt.data("_create_cohorted_video", "_create_cohorted_vertical_with_video")
def test_with_cohorted_content(self, content_creator_method_name):
self.login_and_enroll()
self._setup_course_partitions(scheme_id='cohort', is_cohorted=True)
cohorts = []
for group_id in [0, 1]:
getattr(self, content_creator_method_name)(group_id)
cohorts.append(CohortFactory(course_id=self.course.id, name=u"Cohort " + unicode(group_id)))
link = CourseUserGroupPartitionGroup(
course_user_group=cohorts[group_id],
partition_id=self.partition_id,
group_id=group_id,
)
link.save()
for cohort_index in range(len(cohorts)):
# add user to this cohort
add_user_to_cohort(cohorts[cohort_index], self.user.username)
# should only see video for this cohort
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 1)
self.assertEquals(
u"video for group " + unicode(cohort_index),
video_outline[0]["summary"]["name"]
)
# remove user from this cohort
remove_user_from_cohort(cohorts[cohort_index], self.user.username)
# un-cohorted user should see no videos
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 0)
# staff user sees all videos
self.user.is_staff = True
self.user.save()
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 2)
def test_with_hidden_blocks(self):
self.login_and_enroll()
hidden_subsection = ItemFactory.create(
parent=self.section,
category="sequential",
hide_from_toc=True,
)
unit_within_hidden_subsection = ItemFactory.create(
parent=hidden_subsection,
category="vertical",
)
hidden_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
hide_from_toc=True,
)
ItemFactory.create(
parent=unit_within_hidden_subsection,
category="video",
edx_video_id=self.edx_video_id,
)
ItemFactory.create(
parent=hidden_unit,
category="video",
edx_video_id=self.edx_video_id,
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 0)
def test_language(self):
self.login_and_enroll()
video = ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
language_case = namedtuple('language_case', ['transcripts', 'expected_language'])
language_cases = [
# defaults to english
language_case({}, "en"),
# supports english
language_case({"en": 1}, "en"),
# supports another language
language_case({"lang1": 1}, "lang1"),
# returns first alphabetically-sorted language
language_case({"lang1": 1, "en": 2}, "en"),
language_case({"lang1": 1, "lang2": 2}, "lang1"),
]
for case in language_cases:
video.transcripts = case.transcripts
modulestore().update_item(video, self.user.id)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(course_outline[0]['summary']['language'], case.expected_language)
def test_transcripts(self):
self.login_and_enroll()
video = ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
transcript_case = namedtuple('transcript_case', ['transcripts', 'english_subtitle', 'expected_transcripts'])
transcript_cases = [
# defaults to english
transcript_case({}, "", ["en"]),
transcript_case({}, "en-sub", ["en"]),
# supports english
transcript_case({"en": 1}, "", ["en"]),
transcript_case({"en": 1}, "en-sub", ["en"]),
# keeps both english and other languages
transcript_case({"lang1": 1, "en": 2}, "", ["lang1", "en"]),
transcript_case({"lang1": 1, "en": 2}, "en-sub", ["lang1", "en"]),
# adds english to list of languages only if english_subtitle is specified
transcript_case({"lang1": 1, "lang2": 2}, "", ["lang1", "lang2"]),
transcript_case({"lang1": 1, "lang2": 2}, "en-sub", ["lang1", "lang2", "en"]),
]
for case in transcript_cases:
video.transcripts = case.transcripts
video.sub = case.english_subtitle
modulestore().update_item(video, self.user.id)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertSetEqual(
set(course_outline[0]['summary']['transcripts'].keys()),
set(case.expected_transcripts)
)
@attr(shard=2)
class TestTranscriptsDetail(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin,
TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/video_outlines/transcripts/{course_id}..
"""
REVERSE_INFO = {'name': 'video-transcripts-detail', 'params': ['course_id']}
def setUp(self):
super(TestTranscriptsDetail, self).setUp()
self.video = self._create_video_with_subs()
def reverse_url(self, reverse_args=None, **kwargs):
reverse_args = reverse_args or {}
reverse_args.update({
'block_id': self.video.location.block_id,
'lang': kwargs.get('lang', 'en'),
})
return super(TestTranscriptsDetail, self).reverse_url(reverse_args, **kwargs)
def test_incorrect_language(self):
self.login_and_enroll()
self.api_response(expected_response_code=404, lang='pl')
def test_transcript_with_unicode_file_name(self):
self.video = self._create_video_with_subs(custom_subid=u'你好')
self.login_and_enroll()
self.api_response(expected_response_code=200, lang='en')
| agpl-3.0 |
Medium/phantomjs-1 | src/breakpad/src/tools/gyp/test/dependencies/gyptest-lib-only.py | 151 | 1091 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a link time only dependency will get pulled into the set of built
targets, even if no executable uses it.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('lib_only.gyp')
test.build('lib_only.gyp', test.ALL)
# Make doesn't put static libs in a common 'lib' directory, like it does with
# shared libs, so check in the obj path corresponding to the source path.
test.built_file_must_exist('a', type=test.STATIC_LIB, libdir='obj.target')
# TODO(bradnelson/mark):
# On linux and windows a library target will at least pull its link dependencies
# into the generated sln/_main.scons, since not doing so confuses users.
# This is not currently implemented on mac, which has the opposite behavior.
if test.format == 'xcode':
test.built_file_must_not_exist('b', type=test.STATIC_LIB)
else:
test.built_file_must_exist('b', type=test.STATIC_LIB, libdir='obj.target/b')
test.pass_test()
| bsd-3-clause |
tdtrask/ansible | lib/ansible/galaxy/token.py | 102 | 2142 | ########################################################################
#
# (C) 2015, Chris Houseknecht <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from stat import S_IRUSR, S_IWUSR
import yaml
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
def __init__(self):
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
return open(self.file, 'r')
# config.yml not found, create and chomd u+rw
f = open(self.file, 'w')
f.close()
os.chmod(self.file, S_IRUSR | S_IWUSR) # owner has +rw
display.vvv('Created %s' % self.file)
return open(self.file, 'r')
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False)
| gpl-3.0 |
slightlymadphoenix/activityPointsApp | activitypoints/lib/python3.5/site-packages/pip/_vendor/distlib/wheel.py | 412 | 39115 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| mit |
cmakler/econgraphs | lib/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| mit |
xiaoxiamii/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
ruijie/quantum | quantum/plugins/cisco/l2network_plugin_configuration.py | 7 | 2232 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CONF_FILE = find_config_file({'plugin': 'cisco'}, "l2network_plugin.ini")
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read the conf for the l2network_plugin
SECTION_CONF = CONF_PARSER_OBJ['VLANS']
VLAN_NAME_PREFIX = SECTION_CONF['vlan_name_prefix']
VLAN_START = SECTION_CONF['vlan_start']
VLAN_END = SECTION_CONF['vlan_end']
SECTION_CONF = CONF_PARSER_OBJ['PORTS']
MAX_PORTS = SECTION_CONF['max_ports']
SECTION_CONF = CONF_PARSER_OBJ['PORTPROFILES']
MAX_PORT_PROFILES = SECTION_CONF['max_port_profiles']
SECTION_CONF = CONF_PARSER_OBJ['NETWORKS']
MAX_NETWORKS = SECTION_CONF['max_networks']
SECTION_CONF = CONF_PARSER_OBJ['MODEL']
MODEL_CLASS = SECTION_CONF['model_class']
CONF_FILE = find_config_file({'plugin': 'cisco'}, "cisco_plugins.ini")
SECTION_CONF = CONF_PARSER_OBJ['SEGMENTATION']
MANAGER_CLASS = SECTION_CONF['manager_class']
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read the config for the device plugins
PLUGINS = CONF_PARSER_OBJ.walk(CONF_PARSER_OBJ.dummy)
CONF_FILE = find_config_file({'plugin': 'cisco'}, "db_conn.ini")
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read DB config for the Quantum DB
SECTION_CONF = CONF_PARSER_OBJ['DATABASE']
DB_NAME = SECTION_CONF['name']
DB_USER = SECTION_CONF['user']
DB_PASS = SECTION_CONF['pass']
DB_HOST = SECTION_CONF['host']
| apache-2.0 |
cluckmaster/MissionPlanner | Lib/site-packages/scipy/ndimage/info.py | 55 | 2112 | """
N-dimensional image package
===========================
This package contains various functions for multi-dimensional image
processing.
Modules
-------
.. autosummary::
:toctree: generated/
filters -
fourier -
interpolation -
io -
measurements -
morphology -
Functions (partial list)
------------------------
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
center_of_mass - The center of mass of the values of an array at labels
convolve - Multi-dimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multi-dimensional correlation
correlate1d - 1-D correlation along the given axis
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
generic_filter - Multi-dimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
geometric_transform - Apply an arbritrary geometric transform
histogram - Histogram of the values of an array, optionally at labels
imread - Load an image from a file
label - Label features in an array
laplace - n-D Laplace filter based on approximate second derivatives
map_coordinates - Map input array to new coordinates by interpolation
mean - Mean of the values of an array at labels
median_filter - Calculates a multi-dimensional median filter
percentile_filter - Calculates a multi-dimensional percentile filter
rank_filter - Calculates a multi-dimensional rank filter
rotate - Rotate an array
shift - Shift an array
standard_deviation - Standard deviation of an n-D image array
sum - Sum of the values of the array
uniform_filter - Multi-dimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
variance - Variance of the values of an n-D image array
zoom - Zoom an array
Note: the above is only roughly half the functions available in this
package
Objects
-------
.. autosummary::
:toctree: generated/
docdict -
"""
postpone_import = 1
depends = []
| gpl-3.0 |
codewarrior0/pytest | testing/test_recwarn.py | 17 | 6579 | import warnings
import py
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
oldwarn = warnings.showwarning
def test_method(recwarn):
assert warnings.showwarning != oldwarn
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
def test_finalized():
assert warnings.showwarning == oldwarn
""")
res = reprec.countoutcomes()
assert tuple(res) == (2, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self, recwarn):
showwarning = py.std.warnings.showwarning
rec = WarningsRecorder()
with rec:
assert py.std.warnings.showwarning != showwarning
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
assert showwarning == py.std.warnings.showwarning
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
#
# ============ test pytest.deprecated_call() ==============
#
def dep(i):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning)
return 42
reg = {}
def dep_explicit(i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
class TestDeprecatedCall(object):
def test_deprecated_call_raises(self):
excinfo = pytest.raises(AssertionError,
"pytest.deprecated_call(dep, 3)")
assert str(excinfo).find("did not produce") != -1
def test_deprecated_call(self):
pytest.deprecated_call(dep, 0)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
pytest.raises(AssertionError,
"pytest.deprecated_call(dep_explicit, 3)")
def test_deprecated_explicit_call(self):
pytest.deprecated_call(dep_explicit, 0)
pytest.deprecated_call(dep_explicit, 0)
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
| mit |
pranner/CMPUT410-Lab6-Django | v1/lib/python2.7/site-packages/django/utils/deconstruct.py | 70 | 2066 | from __future__ import absolute_import # Avoid importing `importlib` from this package.
from importlib import import_module
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values"
% (name, module_name))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| apache-2.0 |
nyalldawson/QGIS | tests/src/python/test_qgsserver_accesscontrol_wfs.py | 15 | 13735 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
print('CTEST_FULL_OUTPUT')
from qgis.testing import unittest
import urllib.request
import urllib.parse
import urllib.error
from test_qgsserver_accesscontrol import TestQgsServerAccessControl, XML_NS
class TestQgsServerAccessControlWFS(TestQgsServerAccessControl):
def test_wfs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Hello_OnOff</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in WFS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Unexpected Country layer in WFS/GetCapabilities\n%s" % response)
def test_wfs_describefeaturetype_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Hello"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
def test_wfs_describefeaturetype_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Country"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Country"') != -1,
"No Country layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find('name="Country"') != -1,
"Unexpected Country layer in DescribeFeatureType\n%s" % response)
def test_wfs_getfeature_hello(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_hello2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_country(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_OnOff" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response) # spellok
# # Subset String # #
def test_wfs_getfeature_subsetstring(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
def test_wfs_getfeature_subsetstring2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_project_subsetstring(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for retrieving a feature which should be available in with/without access control
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>7</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring2(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a feature which should be filtered out by access controls
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>8</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"Feature with pkuid=8 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring3(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a features which should be filtered out by project subsetStrings.
For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be no results, since pkuid 1 should be filtered out by project subsetString
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") == -1,
"Project based layer subsetString not respected in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
twilio/howtos | intercom/gdata/blogger/data.py | 61 | 4551 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = '[email protected] (Jeff Scudder)'
import re
import urlparse
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
PAGE_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
"""Adds convenience methods inherited by all Blogger entries."""
def get_blog_id(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = BLOG_ID_PATTERN.match(self.id.text)
if match:
return match.group(2)
else:
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
return None
GetBlogId = get_blog_id
def get_blog_name(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return urlparse.urlparse(link.href)[1].split(".", 1)[0]
return None
GetBlogName = get_blog_name
class Blog(BloggerEntry):
"""Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
entry = [Blog]
class BlogPost(BloggerEntry):
"""Represents a single post on a blog."""
def add_label(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
AddLabel = add_label
def get_post_id(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return POST_ID_PATTERN.match(self.id.text).group(4)
return None
GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
entry = [BlogPost]
class BlogPage(BloggerEntry):
"""Represents a single page on a blog."""
def get_page_id(self):
"""Extracts the pageID string from entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return PAGE_ID_PATTERN.match(self.id.text).group(4)
return None
GetPageId = get_page_id
class BlogPageFeed(gdata.data.GDFeed):
entry = [BlogPage]
class InReplyTo(atom.core.XmlElement):
_qname = THR_TEMPLATE % 'in-reply-to'
href = 'href'
ref = 'ref'
source = 'source'
type = 'type'
class Comment(BloggerEntry):
"""Blog post comment entry in a feed listing comments on a post or blog."""
in_reply_to = InReplyTo
def get_comment_id(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
return None
GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
entry = [Comment]
| mit |
lxn2/mxnet | example/rcnn/demo.py | 13 | 5637 | import argparse
import os
import cv2
import mxnet as mx
import numpy as np
from rcnn.logger import logger
from rcnn.config import config
from rcnn.symbol import get_vgg_test, get_vgg_rpn_test
from rcnn.io.image import resize, transform
from rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection
from rcnn.utils.load_model import load_param
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
config.TEST.HAS_RPN = True
SHORT_SIDE = config.SCALES[0][0]
LONG_SIDE = config.SCALES[0][1]
PIXEL_MEANS = config.PIXEL_MEANS
DATA_NAMES = ['data', 'im_info']
LABEL_NAMES = None
DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]
LABEL_SHAPES = None
# visualization
CONF_THRESH = 0.7
NMS_THRESH = 0.3
nms = py_nms_wrapper(NMS_THRESH)
def get_net(symbol, prefix, epoch, ctx):
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
# infer shape
data_shape_dict = dict(DATA_SHAPES)
arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()
arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(arg_names, arg_shape))
aux_shape_dict = dict(zip(aux_names, aux_shape))
# check shapes
for k in symbol.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in symbol.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,
provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,
arg_params=arg_params, aux_params=aux_params)
return predictor
def generate_batch(im):
"""
preprocess image, return batch
:param im: cv2.imread returns [height, width, channel] in BGR
:return:
data_batch: MXNet input batch
data_names: names in data_batch
im_scale: float number
"""
im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)
im_array = transform(im_array, PIXEL_MEANS)
im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
data = [mx.nd.array(im_array), mx.nd.array(im_info)]
data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch, DATA_NAMES, im_scale
def demo_net(predictor, image_name, vis=False):
"""
generate data_batch -> im_detect -> post process
:param predictor: Predictor
:param image_name: image name
:param vis: will save as a new image if not visualized
:return: None
"""
assert os.path.exists(image_name), image_name + ' not found'
im = cv2.imread(image_name)
data_batch, data_names, im_scale = generate_batch(im)
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
all_boxes = [[] for _ in CLASSES]
for cls in CLASSES:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind, np.newaxis]
keep = np.where(cls_scores >= CONF_THRESH)[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
keep = nms(dets)
all_boxes[cls_ind] = dets[keep, :]
boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
# print results
logger.info('---class---')
logger.info('[[x1, x2, y1, y2, confidence]]')
for ind, boxes in enumerate(boxes_this_image):
if len(boxes) > 0:
logger.info('---%s---' % CLASSES[ind])
logger.info('%s' % boxes)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
else:
result_file = image_name.replace('.', '_result.')
logger.info('results saved to %s' % result_file)
im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
cv2.imwrite(result_file, im)
def parse_args():
parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network')
parser.add_argument('--image', help='custom image', type=str)
parser.add_argument('--prefix', help='saved model prefix', type=str)
parser.add_argument('--epoch', help='epoch of pretrained model', type=int)
parser.add_argument('--gpu', help='GPU device to use', default=0, type=int)
parser.add_argument('--vis', help='display result', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
ctx = mx.gpu(args.gpu)
symbol = get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
predictor = get_net(symbol, args.prefix, args.epoch, ctx)
demo_net(predictor, args.image, args.vis)
if __name__ == '__main__':
main()
| apache-2.0 |
hjoliver/cylc | tests/unit/tui/test_data.py | 1 | 1331 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
| gpl-3.0 |
fosfataza/protwis | construct/migrations/0002_auto_20180117_1457.py | 3 | 1640 | # Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ligand', '0001_initial'),
('construct', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.Ligand'),
),
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand_role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.LigandRole'),
),
migrations.AddField(
model_name='crystallization',
name='chemical_lists',
field=models.ManyToManyField(to='construct.ChemicalList'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_method',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationMethods'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationTypes'),
),
migrations.AddField(
model_name='crystallization',
name='ligands',
field=models.ManyToManyField(to='construct.CrystallizationLigandConc'),
),
]
| apache-2.0 |
dessHub/bc-14-online-store-application | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 354 | 10534 | from __future__ import absolute_import, division, unicode_literals
from . import base
class Filter(base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-3.0 |
mcocdawc/chemopt | src/chemopt/utilities/_print_versions.py | 2 | 4591 | # The following code was taken from the pandas project and modified.
# http://pandas.pydata.org/
import codecs
import importlib
import locale
import os
import platform
import struct
import sys
def get_sys_info():
"Returns system information as a dict"
blob = []
# commit = cc._git_hash
# blob.append(('commit', commit))
try:
(sysname, nodename, release, version,
machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
# ("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
])
except Exception:
pass
return blob
def show_versions(as_json=False):
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("chemcoord", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("pandas", lambda mod: mod.__version__),
("numba", lambda mod: mod.__version__),
("sortedcontainers", lambda mod: mod.__version__),
("sympy", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
# ("tables", lambda mod: mod.__version__),
# ("matplotlib", lambda mod: mod.__version__),
# ("Cython", lambda mod: mod.__version__),
# ("xarray", lambda mod: mod.__version__),
# ("patsy", lambda mod: mod.__version__),
# ("dateutil", lambda mod: mod.__version__),
# ("pytz", lambda mod: mod.VERSION),
# ("blosc", lambda mod: mod.__version__),
# ("bottleneck", lambda mod: mod.__version__),
# ("numexpr", lambda mod: mod.__version__),
# ("feather", lambda mod: mod.__version__),
# ("openpyxl", lambda mod: mod.__version__),
# ("xlrd", lambda mod: mod.__VERSION__),
# ("xlwt", lambda mod: mod.__VERSION__),
# ("xlsxwriter", lambda mod: mod.__version__),
# ("lxml", lambda mod: mod.etree.__version__),
# ("bs4", lambda mod: mod.__version__),
# ("html5lib", lambda mod: mod.__version__),
# ("sqlalchemy", lambda mod: mod.__version__),
# ("pymysql", lambda mod: mod.__version__),
# ("psycopg2", lambda mod: mod.__version__),
# ("jinja2", lambda mod: mod.__version__),
# ("s3fs", lambda mod: mod.__version__),
# ("pandas_gbq", lambda mod: mod.__version__),
# ("pandas_datareader", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except Exception:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
options = parser.parse_args()[0]
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 |
da1z/intellij-community | python/lib/Lib/pwd.py | 93 | 2552 | """
This module provides access to the Unix password database.
Password database entries are reported as 7-tuples containing the
following items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. The
uid and gid items are integers, all others are strings. An exception
is raised if the entry asked for cannot be found.
"""
__all__ = ['getpwuid', 'getpwnam', 'getpwall']
from os import _name, _posix_impl
from org.python.core.Py import newString
if _name == 'nt':
raise ImportError, 'pwd module not supported on Windows'
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
attrs = ['pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos',
'pw_dir', 'pw_shell']
def __new__(cls, pwd):
pwd = (newString(pwd.loginName), newString(pwd.password), int(pwd.UID),
int(pwd.GID), newString(pwd.GECOS), newString(pwd.home),
newString(pwd.shell))
return tuple.__new__(cls, pwd)
def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except ValueError:
raise AttributeError
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry)
def getpwnam(name):
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwnam(name)
if not entry:
raise KeyError(name)
return struct_passwd(entry)
def getpwall():
"""
getpwall() -> list_of_entries
Return a list of all available password database entries,
in arbitrary order.
See pwd.__doc__ for more on password database entries.
"""
entries = []
while True:
entry = _posix_impl.getpwent()
if not entry:
break
entries.append(struct_passwd(entry))
return entries
| apache-2.0 |
InAnimaTe/CouchPotatoServer | libs/tornado/platform/common.py | 285 | 3403 | """Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
| gpl-3.0 |
jmschrei/scikit-learn | sklearn/cluster/k_means_.py | 30 | 55793 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=np.float64, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
shift = squared_norm(centers_old - centers)
if shift <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
if shift > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
koparasy/faultinjection-gem5 | src/arch/power/PowerTLB.py | 20 | 1765 | # -*- mode:python -*-
# Copyright (c) 2009 The University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Timothy M. Jones
from m5.SimObject import SimObject
from m5.params import *
class PowerTLB(SimObject):
type = 'PowerTLB'
cxx_class = 'PowerISA::TLB'
size = Param.Int(64, "TLB size")
| bsd-3-clause |
JonathonReinhart/scuba | scuba/config.py | 1 | 11648 | import os
import yaml
import re
import shlex
from .constants import *
from .utils import *
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
self._cache = dict()
super().__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
parts = shlex.split(content)
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
doc = self._cache.get(path)
if not doc:
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
self._cache[path] = doc
# Retrieve the key
try:
cur = doc
# Use a negative look-behind to split the key on non-escaped '.' characters
for k in re.split(r'(?<!\\)\.', key):
cur = cur[k.replace('\\.', '.')] # Be sure to replace any escaped '.' characters with *just* the '.'
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
Loader.add_constructor('!from_yaml', Loader.from_yaml)
def find_config():
'''Search up the directory hierarchy for .scuba.yml
Returns: path, rel, config on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
config The loaded configuration
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
cfg_path = os.path.join(path, SCUBA_YML)
if os.path.exists(cfg_path):
return path, rel, load_config(cfg_path)
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel)
def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, str):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, str):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name))
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_entrypoint(data):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between entrypoint being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
key = 'entrypoint'
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if ep is None:
ep = ''
if not isinstance(ep, str):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
class ScubaAlias:
def __init__(self, name, script, image, entrypoint, environment, shell, as_root):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
self.shell = shell
self.as_root = as_root
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
image = None
entrypoint = None
environment = None
shell = None
as_root = False
if isinstance(node, dict): # Rich alias
image = node.get('image')
entrypoint = _get_entrypoint(node)
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment'))
shell = node.get('shell')
as_root = node.get('root', as_root)
return cls(name, script, image, entrypoint, environment, shell, as_root)
class ScubaContext:
pass
class ScubaConfig:
def __init__(self, **data):
optional_nodes = ('image','aliases','hooks','entrypoint','environment','shell')
# Check for unrecognized nodes
extra = [n for n in data if not n in optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data.get('image')
self._shell = data.get('shell', DEFAULT_SHELL)
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
if not self._image:
raise ConfigError("Top-level 'image' not set")
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
@property
def shell(self):
return self._shell
def process_command(self, command, image=None, shell=None):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
image Override the image from .scuba.yml
shell Override the shell from .scuba.yml
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = None
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
result.shell = self.shell
result.as_root = False
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
if alias.shell is not None:
result.shell = alias.shell
if alias.as_root:
result.as_root = True
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
# If a shell was given on the CLI, it should override the shell set by
# the alias or top-level config
if shell:
result.shell = shell
# If an image was given, it overrides what might have been set by an alias
if image:
result.image = image
# If the image was still not set, then try to get it from the confg,
# which will raise a ConfigError if it is not set
if not result.image:
result.image = self.image
return result
def load_config(path):
try:
with open(path, 'r') as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
| mit |
badele/home-assistant | homeassistant/components/switch/modbus.py | 9 | 4290 | """
homeassistant.components.switch.modbus
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Modbus switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.modbus/
"""
import logging
import homeassistant.components.modbus as modbus
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Read configuration and create Modbus devices. """
switches = []
slave = config.get("slave", None)
if modbus.TYPE == "serial" and not slave:
_LOGGER.error("No slave number provided for serial Modbus")
return False
registers = config.get("registers")
if registers:
for regnum, register in registers.items():
bits = register.get("bits")
for bitnum, bit in bits.items():
if bit.get("name"):
switches.append(ModbusSwitch(bit.get("name"),
slave,
regnum,
bitnum))
coils = config.get("coils")
if coils:
for coilnum, coil in coils.items():
switches.append(ModbusSwitch(coil.get("name"),
slave,
coilnum,
0,
coil=True))
add_devices(switches)
class ModbusSwitch(ToggleEntity):
# pylint: disable=too-many-arguments
""" Represents a Modbus switch. """
def __init__(self, name, slave, register, bit, coil=False):
self._name = name
self.slave = int(slave) if slave else 1
self.register = int(register)
self.bit = int(bit)
self._coil = coil
self._is_on = None
self.register_value = None
def __str__(self):
return "%s: %s" % (self.name, self.state)
@property
def should_poll(self):
"""
We should poll, because slaves are not allowed to initiate
communication on Modbus networks.
"""
return True
@property
def unique_id(self):
""" Returns a unique id. """
return "MODBUS-SWITCH-{}-{}-{}".format(self.slave,
self.register,
self.bit)
@property
def is_on(self):
""" Returns True if switch is on. """
return self._is_on
@property
def name(self):
""" Get the name of the switch. """
return self._name
def turn_on(self, **kwargs):
""" Set switch on. """
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, True)
else:
val = self.register_value | (0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def turn_off(self, **kwargs):
""" Set switch off. """
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, False)
else:
val = self.register_value & ~(0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def update(self):
""" Update the state of the switch. """
if self._coil:
result = modbus.NETWORK.read_coils(self.register, 1)
self.register_value = result.bits[0]
self._is_on = self.register_value
else:
result = modbus.NETWORK.read_holding_registers(
unit=self.slave, address=self.register,
count=1)
val = 0
for i, res in enumerate(result.registers):
val += res * (2**(i*16))
self.register_value = val
self._is_on = (val & (0x0001 << self.bit) > 0)
| mit |
RichardLitt/wyrd-django-dev | tests/regressiontests/admin_filters/tests.py | 4 | 33533 | from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', '[email protected]')
self.bob = User.objects.create_user('bob', '[email protected]')
self.lisa = User.objects.create_user('lisa', '[email protected]')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
dev = Department.objects.create(code='DEV', description='Development')
design = Department.objects.create(code='DSN', description='Design')
john = Employee.objects.create(name='John Blue', department=dev)
jack = Employee.objects.create(name='Jack Red', department=design)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [jack, john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
| bsd-3-clause |
vpramo/contrail-controller | src/vnsw/opencontrail-vrouter-netns/opencontrail_vrouter_netns/linux/utils.py | 15 | 4675 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juliano Martinez, Locaweb.
import fcntl
import os
import shlex
import signal
import socket
import struct
import tempfile
import sys
if sys.version_info[:2] == (2, 6):
import subprocess
else:
from eventlet.green import subprocess
from eventlet import greenthread
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def create_process(cmd, root_helper=None, addl_env=None):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return obj, cmd
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False):
try:
obj, cmd = create_process(cmd, root_helper=root_helper,
addl_env=addl_env)
_stdout, _stderr = (process_input and
obj.communicate(process_input) or
obj.communicate())
obj.stdin.close()
m = ("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
'stdout': _stdout, 'stderr': _stderr}
if obj.returncode:
if check_exit_code:
raise RuntimeError(m)
finally:
if sys.version_info[:2] == (2, 6):
pass
else:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout
def get_interface_mac(interface):
DEVICE_NAME_LEN = 15
MAC_START = 18
MAC_END = 24
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927,
struct.pack('256s', interface[:DEVICE_NAME_LEN]))
return ''.join(['%02x:' % ord(char)
for char in info[MAC_START:MAC_END]])[:-1]
def replace_file(file_name, data):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
atomic, the file is unlikely to be corrupted by competing writes.
We create the tempfile on the same device to ensure that it can be renamed.
"""
base_dir = os.path.dirname(os.path.abspath(file_name))
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
tmp_file.write(data)
tmp_file.close()
os.chmod(tmp_file.name, 0o644)
os.rename(tmp_file.name, file_name)
def find_child_pids(pid):
"""Retrieve a list of the pids of child processes of the given pid."""
try:
raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])
except RuntimeError as e:
no_children_found = 'Exit code: 1' in str(e)
if no_children_found:
ctxt.reraise = False
return []
raise
return [x.strip() for x in raw_pids.split('\n') if x.strip()]
| apache-2.0 |
lscheinkman/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| agpl-3.0 |
gmatteo/pymatgen | pymatgen/analysis/thermochemistry.py | 5 | 3877 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A module to perform experimental thermochemical data analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jun 10, 2012"
from pymatgen.core.composition import Composition
STANDARD_TEMP = 298.0
class ThermoData:
"""
A object container for an experimental Thermochemical Data.
"""
def __init__(
self,
data_type,
cpdname,
phaseinfo,
formula,
value,
ref="",
method="",
temp_range=(298, 298),
uncertainty=None,
):
"""
Args:
data_type: The thermochemical data type. Should be one of the
following: fH - Formation enthalpy, S - Entropy,
A, B, C, D, E, F, G, H - variables for use in the various
quations for generating formation enthaplies or Cp at
various temperatures.
cpdname (str): A name for the compound. For example, hematite for
Fe2O3.
phaseinfo (str): Denoting the phase. For example, "solid", "liquid",
"gas" or "tetragonal".
formula (str): A proper string formula, e.g., Fe2O3
value (float): The value of the data.
ref (str): A reference, if any, for the data.
method (str): The method by which the data was determined,
if available.
temp_range ([float, float]): Temperature range of validity for the
data in Kelvin. Defaults to 298 K only.
uncertainty (float):
An uncertainty for the data, if available.
"""
self.type = data_type
self.formula = formula
self.composition = Composition(self.formula)
self.reduced_formula = self.composition.reduced_formula
self.compound_name = cpdname
self.phaseinfo = phaseinfo
self.value = value
self.temp_range = temp_range
self.method = method
self.ref = ref
self.uncertainty = uncertainty
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
ThermoData
"""
return ThermoData(
d["type"],
d["compound_name"],
d["phaseinfo"],
d["formula"],
d["value"],
d["ref"],
d["method"],
d["temp_range"],
d.get("uncertainty", None),
)
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"type": self.type,
"formula": self.formula,
"compound_name": self.compound_name,
"phaseinfo": self.phaseinfo,
"value": self.value,
"temp_range": self.temp_range,
"method": self.method,
"ref": self.ref,
"uncertainty": self.uncertainty,
}
def __repr__(self):
props = [
"formula",
"compound_name",
"phaseinfo",
"type",
"temp_range",
"value",
"method",
"ref",
"uncertainty",
]
output = ["{} : {}".format(k, getattr(self, k)) for k in props]
return "\n".join(output)
def __str__(self):
return "{}_{}_{} = {}, Valid T : {}, Ref = {}".format(
self.type,
self.formula,
self.phaseinfo,
self.value,
self.temp_range,
self.ref,
)
| mit |
YongseopKim/crosswalk-test-suite | webapi/tct-selectorslevel2-w3c-tests/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/model_selection/_split.py | 12 | 63090 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
kmiller96/Shipping-Containers-Software | lib/core.py | 1 | 8600 | # AUTHOR: Kale Miller
# DESCRIPTION: The 'main brain' of the program is held in here.
# 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576
# 6572616c207665727920736d616c6c20706f737369626c65207461736b732e
# DEVELOPMENT LOG:
# 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class.
# 12/12/16: Tweaked the IDGenerator class to help remove dependancy.
# 13/12/16: Fleshed out the NewHoldingBay class.
# 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, time
import numpy as np
from lib import containers
CONTAINER_CLASSES = [
containers.BasicContainer,
containers.HeavyContainer,
containers.RefrigeratedContainer,
containers.LiquidContainer,
containers.ExplosivesContainer,
containers.ToxicContainer,
containers.ChemicalContainer
]
CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical']
SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C']
TAG_APPLICATION_TIME = 0.2
PRINTALL_TIME = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processshipfile(filename, path):
"""Processes the csv file that the ship supplies."""
def _deletenewline(string):
"""Deletes the \n symbol from a string if it exists."""
try:
truncatedstring = string[:string.index('\n')]
except ValueError:
truncatedstring = string
finally:
return truncatedstring
try:
home = os.getcwd()
os.chdir(path)
except WindowsError: # Would this hold true on all machines?
raise NameError, "The path specified does not exist."
rawfile = open(filename, 'r')
arylines = rawfile.readlines()
basematrix = map(lambda x: _deletenewline(x).split(','), arylines)
numpyarray = np.array(basematrix)
return numpyarray
class IDGenerator:
"""Controls the assignment of id tags on the containers."""
# TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists.
def __init__(self):
"""Initialise the id generator."""
self._COUNTERS = [0] * len(CONTAINER_TYPES)
return
def _findindex(self, container):
"""Determines the index in the lists the class should use."""
return CONTAINER_TYPES.index(container)
def _serialcode(self, index):
"""Fetches the serial code for a supplied index."""
return SERIAL_CODES[index]
def _counter(self, index):
"""Fetches the counter for a specific serial type and increments it by one."""
self._COUNTERS[index] += 1
return self._COUNTERS[index]
def newid(self, containertype):
"""Generates a new id."""
ii = self._findindex(containertype)
idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5)
return idtag
class NewHoldingBay:
"""Creates a new holding bay for the containers. Thus it contains all of the information about the containers
along with the methods controlling unloading and loading them."""
def __init__(self):
self._path = os.getcwd()
self.idgenerator = IDGenerator()
self.containerlist = list()
self._iOnship = 0
self._iLoaded = 0
self._iHolding = 0
return None
def _createcontainer(self, containerstr, parameters):
"""Creates a new container class based off the first column of the CSV."""
# TODO: Fix this method up to catch more and print useful error messages.
if not isinstance(containerstr, str):
raise TypeError, "The parameter passed must be a string."
elif len(containerstr) == 1:
try:
ii = SERIAL_CODES.index(containerstr)
except ValueError:
raise Exception("Bad input.") # TODO: Fix this area up.
elif len(containerstr) != 1:
try:
ii = CONTAINER_TYPES.index(containerstr)
except ValueError:
raise Exception("Bad input.")
idtag = self.idgenerator.newid(CONTAINER_TYPES[ii])
return CONTAINER_CLASSES[ii](idtag, *parameters)
def defineship(self, file):
"""Pass in the CSV file of the ship in order to unload it."""
shipdata = processshipfile(file, self._path)
shipdata = shipdata[1::] # Throw out the headers.
for line in shipdata:
newcontainer = self._createcontainer(line[0], (line[1], line[3]))
self.containerlist.append(newcontainer)
self._iOnship += 1
def printcontainer(self, serial):
"""Prints the information about a specific container."""
for container in self.containerlist:
if container.id() == serial:
container.information()
return None
else:
continue
raise NameError, "Unable to find container with serial code %s" % serial
return -1
def printallinformation(self):
"""Prints the information of all the containers."""
for container in self.containerlist:
container.information()
time.sleep(PRINTALL_TIME)
return None
def unloadall(self, debug=False):
"""Unloads all of the containers from the ship."""
for container in self.containerlist:
container.unload(debug=debug)
self._iHolding += 1
self._iOnship -= 1
return None
def loadall(self, debug=False):
"""Loads all of the containers into trucks and trains."""
# TODO: Proper loading locations.
ii = 1
for container in self.containerlist:
container.load('Truck ' + str(ii).zfill(3), debug=debug)
self._iHolding -= 1
self._iLoaded += 1
ii += 1
return None
def printauditedload(self):
"""Prints information about the holding bay at this time."""
iOnship = 0; iLoaded = 0; iHolding = 0
iContainercount = [0] * len(CONTAINER_TYPES)
for container in self.containerlist:
try:
ii = CONTAINER_TYPES.index(container._type)
iContainercount[ii] += 1
except ValueError:
raise NameError, "One (or more) containers don't have a valid type."
# Print the appropriate information.
print "----------------------------------------------------------------------"
print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3)
print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3)
print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3)
print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3)
print ""
print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3)
for ii in xrange(len(CONTAINER_TYPES)):
if iContainercount[ii] == 0: continue
print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3)
print "----------------------------------------------------------------------"
return None
def addidtags(self, debug=False):
"""Applys appropriate serial numbers to all of the containers."""
for container in self.containerlist:
print "Applying id tag to container %s" % container.id()
if not debug: time.sleep(TAG_APPLICATION_TIME)
container.addidtag()
return None
def applyauxilarylabels(self):
"""Applys the labels that should go on containers about their contents and handling."""
for container in self.containerlist:
print "Adding labels to container %s" % container.id()
container.addauxilarylabels()
return None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| mit |
ashhher3/invenio | modules/bibrank/lib/bibrank_downloads_similarity.py | 19 | 4328 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = \
"$Id$"
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_CERN_SITE
from invenio.dbquery import run_sql
from invenio.bibrank_downloads_indexer import database_tuples_to_single_list
from invenio.search_engine_utils import get_fieldvalues
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
Copy from search_engine"""
out = 0
query = "SELECT id FROM bibrec WHERE id='%s'" % recID
res = run_sql(query, None, 1)
if res:
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
### INTERFACE
def register_page_view_event(recid, uid, client_ip_address):
"""Register Detailed record page view event for record RECID
consulted by user UID from machine CLIENT_HOST_IP.
To be called by the search engine.
"""
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
# do not register access if we are in read-only access control
# site mode:
return []
return run_sql("INSERT DELAYED INTO rnkPAGEVIEWS " \
" (id_bibrec,id_user,client_host,view_time) " \
" VALUES (%s,%s,INET_ATON(%s),NOW())", \
(recid, uid, client_ip_address))
def calculate_reading_similarity_list(recid, type="pageviews"):
"""Calculate reading similarity data to use in reading similarity
boxes (``people who downloaded/viewed this file/page have also
downloaded/viewed''). Return list of (recid1, score1),
(recid2,score2), ... for all recidN that were consulted by the
same people who have also consulted RECID. The reading
similarity TYPE can be either `pageviews' or `downloads',
depending whether we want to obtain page view similarity or
download similarity.
"""
if CFG_CERN_SITE:
return [] # CERN hack 2009-11-23 to ease the load
if type == "downloads":
tablename = "rnkDOWNLOADS"
else: # default
tablename = "rnkPAGEVIEWS"
# firstly compute the set of client hosts who consulted recid:
client_host_list = run_sql("SELECT DISTINCT(client_host)" + \
" FROM " + tablename + \
" WHERE id_bibrec=%s " + \
" AND client_host IS NOT NULL",
(recid,))
# secondly look up all recids that were consulted by these client hosts,
# and order them by the number of different client hosts reading them:
res = []
if client_host_list != ():
client_host_list = str(database_tuples_to_single_list(client_host_list))
client_host_list = client_host_list.replace("L", "")
client_host_list = client_host_list.replace("[", "")
client_host_list = client_host_list.replace("]", "")
res = run_sql("SELECT id_bibrec,COUNT(DISTINCT(client_host)) AS c" \
" FROM " + tablename + \
" WHERE client_host IN (" + client_host_list + ")" + \
" AND id_bibrec != %s" \
" GROUP BY id_bibrec ORDER BY c DESC LIMIT 10",
(recid,))
return res
| gpl-2.0 |
zkota/pyblio-1.2 | pybrc.py | 2 | 1564 | # Site configuration
from Pyblio import Autoload, Config, version
from Pyblio.TextUI import *
# ==================================================
import string, os
# define autoloaded formats
Autoload.preregister ('format', 'BibTeX', 'Pyblio.Format.BibTeX', '.*\.bib')
Autoload.preregister ('format', 'Ovid', 'Pyblio.Format.Ovid', '.*\.ovid')
Autoload.preregister ('format', 'Medline', 'Pyblio.Format.Medline', '.*\.med')
Autoload.preregister ('format', 'Refer', 'Pyblio.Format.Refer', '.*\.refer')
Autoload.preregister ('format', 'ISIFile', 'Pyblio.Format.isifile', '.*\.isi')
# define styles and outputs
Autoload.preregister ('style', 'Generic', 'Pyblio.Style.Generic')
Autoload.preregister ('style', 'apa4e', 'Pyblio.Style.apa4e')
Autoload.preregister ('style', 'abbrv', 'Pyblio.Style.abbrv')
Autoload.preregister ('output', 'Text', 'Pyblio.Output.text')
Autoload.preregister ('output', 'Raw', 'Pyblio.Output.raw')
Autoload.preregister ('output', 'HTML', 'Pyblio.Output.html')
Autoload.preregister ('output', 'LaTeX', 'Pyblio.Output.LaTeX')
Autoload.preregister ('output', 'Textnum', 'Pyblio.Output.textnum')
Autoload.preregister ('output', 'Textau', 'Pyblio.Output.textau')
# define key formats
Autoload.preregister ('key', 'Default', 'Pyblio.Utils')
# Parse the configuration directory
rootconfig = os.path.join ('Pyblio', 'ConfDir')
if not os.path.isdir (rootconfig):
rootconfig = os.path.join (version.pybdir, 'Pyblio', 'ConfDir')
if os.path.isdir (rootconfig):
Config.parse_directory (rootconfig)
| gpl-2.0 |
bryceguo/robotframework-selenium2library | demo/package.py | 4 | 1378 | #!/usr/bin/env python
import os, sys
from time import localtime
from zipfile import ZipFile, ZIP_DEFLATED
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(THIS_DIR, "..", "src", "Selenium2Library"))
import metadata
FILES = {
'': ['rundemo.py'],
'login_tests': ['valid_login.txt', 'invalid_login.txt', 'resource.txt'],
'demoapp': ['server.py'],
'demoapp/html': ['index.html', 'welcome.html', 'error.html', 'demo.css']
}
def main():
cwd = os.getcwd()
try:
os.chdir(THIS_DIR)
name = 'robotframework-selenium2library-%s-demo' % metadata.VERSION
zipname = '%s.zip' % name
if os.path.exists(zipname):
os.remove(zipname)
zipfile = ZipFile(zipname, 'w', ZIP_DEFLATED)
for dirname in FILES:
for filename in FILES[dirname]:
path = os.path.join('.', dirname.replace('/', os.sep), filename)
print 'Adding: ', os.path.normpath(path)
zipfile.write(path, os.path.join(name, path))
zipfile.close()
target_path = os.path.join('..', 'dist', zipname)
if os.path.exists(target_path):
os.remove(target_path)
os.rename(zipname, target_path)
print 'Created: ', os.path.abspath(target_path)
finally:
os.chdir(cwd)
if __name__ == '__main__':
main()
| apache-2.0 |
dhruvagarwal/django | tests/template_tests/filter_tests/test_pluralize.py | 430 | 1200 | from decimal import Decimal
from django.template.defaultfilters import pluralize
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_integers(self):
self.assertEqual(pluralize(1), '')
self.assertEqual(pluralize(0), 's')
self.assertEqual(pluralize(2), 's')
def test_floats(self):
self.assertEqual(pluralize(0.5), 's')
self.assertEqual(pluralize(1.5), 's')
def test_decimals(self):
self.assertEqual(pluralize(Decimal(1)), '')
self.assertEqual(pluralize(Decimal(0)), 's')
self.assertEqual(pluralize(Decimal(2)), 's')
def test_lists(self):
self.assertEqual(pluralize([1]), '')
self.assertEqual(pluralize([]), 's')
self.assertEqual(pluralize([1, 2, 3]), 's')
def test_suffixes(self):
self.assertEqual(pluralize(1, 'es'), '')
self.assertEqual(pluralize(0, 'es'), 'es')
self.assertEqual(pluralize(2, 'es'), 'es')
self.assertEqual(pluralize(1, 'y,ies'), 'y')
self.assertEqual(pluralize(0, 'y,ies'), 'ies')
self.assertEqual(pluralize(2, 'y,ies'), 'ies')
self.assertEqual(pluralize(0, 'y,ies,error'), '')
| bsd-3-clause |
mapseed/api | src/sa_api_v2/management/commands/updateDatasetDevCreds.py | 3 | 3486 | from __future__ import print_function
from django.core.management.base import BaseCommand
import os
import re
# for manually testing with `./manage.py shell` commandline:
# from ... import models as sa_models
# from ... import forms
from sa_api_v2 import models as sa_models
import logging
# display our logs to console with StreamHandler:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(console_handler)
# The name of our file containing our dev key values,
# located in our project root.
DATASET_ENV_FILE = '.dataset-env'
# The suffix of our dev key variables in our .env file:
DEV_KEY_SUFFIX = '_DEV_KEY'
def parse_env(dict):
"""
Parses variables from a .env file located in the project root
directory and loads them into the dictionary.
"""
try:
file_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', DATASET_ENV_FILE)
with open(file_path) as f:
content = f.read()
except IOError:
content = ''
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+){}=(.*)\Z'.format(DEV_KEY_SUFFIX),
line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
logger.info('parsing key from environment file: {}={}'
.format(key, val))
dict.setdefault('{}{}'.format(key, DEV_KEY_SUFFIX), val)
class Command(BaseCommand):
help = """
For our dev api, update the api key value of all our datasets to
the constants defined in our .env file
This command is idempotent.
"""
def handle(self, *args, **options):
logger.info('parsing environment variables...')
api_key_values = {}
parse_env(api_key_values)
logger.info('environment variables: {}'.format(api_key_values))
logger.info('starting dataset key migration...')
datasets = sa_models.DataSet.objects.all()
logger.info('fetching matching...')
logger.info('')
for dataset in datasets:
api_key_value = api_key_values.get('{}{}'.format(
dataset.display_name.upper(),
DEV_KEY_SUFFIX), None)
# handle case when we have a dataset but no dev key:
if api_key_value is None:
logger.error('No matching key found for dataset: {}'
.format(dataset.display_name))
logger.error('perhaps we should create a key for it?\n')
continue
# handle case when we have a dataset with no key:
if dataset.keys is None or len(dataset.keys.all()) < 1:
logger.error('Skipping dataset because it has no api key: {}\n'
.format(dataset.display_name))
continue
logger.info('setting key for datset name: {} to value: {}'
.format(dataset.display_name, api_key_value))
# save the new value to our dataset's key:
key = dataset.keys.all()[0]
key.key = api_key_value
key.save()
dataset.key = key
dataset.save()
| gpl-3.0 |
hargup/sympy | sympy/polys/domains/tests/test_polynomialring.py | 99 | 3314 | """Tests for the PolynomialRing classes. """
from sympy.polys.domains import QQ, ZZ
from sympy.polys.polyerrors import ExactQuotientFailed, CoercionFailed, NotReversible
from sympy.abc import x, y
from sympy.utilities.pytest import raises
def test_build_order():
R = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y)))
assert R.order((1, 5)) == ((1,), (-5,))
def test_globalring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y)
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) not in R
assert Y in R
assert X.ring == R
assert X * (Y**2 + 1) == R.convert(x * (y**2 + 1))
assert X * y == X * Y == R.convert(x * y) == x * Y
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
assert R.from_FractionField(Qxy.convert(x)/y, Qxy) is None
assert R._sdm_to_vector(R._vector_to_sdm([X, Y], R.order), 2) == [X, Y]
def test_localring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y, order="ilex")
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) in R
assert Y in R
assert X.ring == R
assert X*(Y**2 + 1)/(1 + X) == R.convert(x*(y**2 + 1)/(1 + x))
assert X*y == X*Y
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
raises(CoercionFailed, lambda: R.from_FractionField(Qxy.convert(x)/y, Qxy))
raises(ExactQuotientFailed, lambda: X/Y)
raises(NotReversible, lambda: X.invert())
assert R._sdm_to_vector(
R._vector_to_sdm([X/(X + 1), Y/(1 + X*Y)], R.order), 2) == \
[X*(1 + X*Y), Y*(1 + X)]
def test_conversion():
L = QQ.old_poly_ring(x, y, order="ilex")
G = QQ.old_poly_ring(x, y)
assert L.convert(x) == L.convert(G.convert(x), G)
assert G.convert(x) == G.convert(L.convert(x), L)
raises(CoercionFailed, lambda: G.convert(L.convert(1/(1 + x)), L))
def test_units():
R = QQ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
R = QQ.old_poly_ring(x, order='ilex')
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert R.is_unit(R.convert(1 + x))
R = ZZ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert not R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
| bsd-3-clause |
britcey/ansible | test/runner/lib/target.py | 28 | 16080 | """Test target identification, iteration and inclusion/exclusion."""
from __future__ import absolute_import, print_function
import os
import re
import errno
import itertools
import abc
from lib.util import ApplicationError
MODULE_EXTENSIONS = '.py', '.ps1'
def find_target_completion(target_func, prefix):
"""
:type target_func: () -> collections.Iterable[CompletionTarget]
:type prefix: unicode
:rtype: list[str]
"""
try:
targets = target_func()
prefix = prefix.encode()
short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
matches = walk_completion_targets(targets, prefix, short)
return matches
except Exception as ex: # pylint: disable=locally-disabled, broad-except
return [str(ex)]
def walk_completion_targets(targets, prefix, short=False):
"""
:type targets: collections.Iterable[CompletionTarget]
:type prefix: str
:type short: bool
:rtype: tuple[str]
"""
aliases = set(alias for target in targets for alias in target.aliases)
if prefix.endswith('/') and prefix in aliases:
aliases.remove(prefix)
matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
if short:
offset = len(os.path.dirname(prefix))
if offset:
offset += 1
relative_matches = [match[offset:] for match in matches if len(match) > offset]
if len(relative_matches) > 1:
matches = relative_matches
return tuple(sorted(matches))
def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[T <= CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[T <= CompletionTarget]
"""
targets = tuple(targets)
include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda t: t.name)
if requires:
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
include_targets = [target for target in include_targets if target in require_targets]
if excludes:
list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
return tuple(sorted(internal_targets, key=lambda t: t.name))
def walk_external_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[CompletionTarget], tuple[CompletionTarget]
"""
targets = tuple(targets)
if requires:
include_targets = list(filter_targets(targets, includes, errors=True, directories=False))
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
includes = [target.name for target in include_targets if target in require_targets]
if includes:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
else:
include_targets = []
else:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
if excludes:
exclude_targets = sorted(filter_targets(targets, excludes, errors=True), key=lambda t: t.name)
else:
exclude_targets = []
previous = None
include = []
for target in include_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
include.append(target)
previous = target
previous = None
exclude = []
for target in exclude_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
exclude.append(target)
previous = target
return tuple(include), tuple(exclude)
def filter_targets(targets, patterns, include=True, directories=True, errors=True):
"""
:type targets: collections.Iterable[CompletionTarget]
:type patterns: list[str]
:type include: bool
:type directories: bool
:type errors: bool
:rtype: collections.Iterable[CompletionTarget]
"""
unmatched = set(patterns or ())
compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
for target in targets:
matched_directories = set()
match = False
if patterns:
for alias in target.aliases:
for pattern in patterns:
if compiled_patterns[pattern].match(alias):
match = True
try:
unmatched.remove(pattern)
except KeyError:
pass
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
elif include:
match = True
if not target.base_path:
matched_directories.add('.')
for alias in target.aliases:
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
if match != include:
continue
if directories and matched_directories:
yield DirectoryTarget(sorted(matched_directories, key=len)[0], target.modules)
else:
yield target
if errors:
if unmatched:
raise TargetPatternsNotMatched(unmatched)
def walk_module_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
path = 'lib/ansible/modules'
for target in walk_test_targets(path, path + '/', extensions=MODULE_EXTENSIONS):
if not target.module:
continue
yield target
def walk_units_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(path='test/units', module_path='test/units/modules/', extensions=('.py',), prefix='test_')
def walk_compile_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/', extensions=('.py',))
def walk_sanity_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/')
def walk_posix_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'posix/' in target.aliases:
yield target
def walk_network_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'network/' in target.aliases:
yield target
def walk_windows_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'windows/' in target.aliases:
yield target
def walk_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
path = 'test/integration/targets'
modules = frozenset(t.module for t in walk_module_targets())
paths = sorted(os.path.join(path, p) for p in os.listdir(path))
prefixes = load_integration_prefixes()
for path in paths:
yield IntegrationTarget(path, modules, prefixes)
def load_integration_prefixes():
"""
:rtype: dict[str, str]
"""
path = 'test/integration'
names = sorted(f for f in os.listdir(path) if os.path.splitext(f)[0] == 'target-prefixes')
prefixes = {}
for name in names:
prefix = os.path.splitext(name)[1][1:]
with open(os.path.join(path, name), 'r') as prefix_fd:
prefixes.update(dict((k, prefix) for k in prefix_fd.read().splitlines()))
return prefixes
def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None):
"""
:type path: str | None
:type module_path: str | None
:type extensions: tuple[str] | None
:type prefix: str | None
:rtype: collections.Iterable[TestTarget]
"""
for root, _, file_names in os.walk(path or '.', topdown=False):
if root.endswith('/__pycache__'):
continue
if '/.tox/' in root:
continue
if path is None:
root = root[2:]
if root.startswith('.'):
continue
for file_name in file_names:
name, ext = os.path.splitext(os.path.basename(file_name))
if name.startswith('.'):
continue
if extensions and ext not in extensions:
continue
if prefix and not name.startswith(prefix):
continue
yield TestTarget(os.path.join(root, file_name), module_path, prefix, path)
class CompletionTarget(object):
"""Command-line argument completion target base class."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.name = None
self.path = None
self.base_path = None
self.modules = tuple()
self.aliases = tuple()
def __eq__(self, other):
if isinstance(other, CompletionTarget):
return self.__repr__() == other.__repr__()
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name.__lt__(other.name)
def __gt__(self, other):
return self.name.__gt__(other.name)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.modules:
return '%s (%s)' % (self.name, ', '.join(self.modules))
return self.name
class DirectoryTarget(CompletionTarget):
"""Directory target."""
def __init__(self, path, modules):
"""
:type path: str
:type modules: tuple[str]
"""
super(DirectoryTarget, self).__init__()
self.name = path
self.path = path
self.modules = modules
class TestTarget(CompletionTarget):
"""Generic test target."""
def __init__(self, path, module_path, module_prefix, base_path):
"""
:type path: str
:type module_path: str | None
:type module_prefix: str | None
:type base_path: str
"""
super(TestTarget, self).__init__()
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and path.startswith(module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = self.module,
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
class IntegrationTarget(CompletionTarget):
"""Integration test target."""
non_posix = frozenset((
'network',
'windows',
))
categories = frozenset(non_posix | frozenset((
'posix',
'module',
'needs',
'skip',
)))
def __init__(self, path, modules, prefixes):
"""
:type path: str
:type modules: frozenset[str]
:type prefixes: dict[str, str]
"""
super(IntegrationTarget, self).__init__()
self.name = os.path.basename(path)
self.path = path
# script_path and type
contents = sorted(os.listdir(path))
runme_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'runme')
test_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'test')
self.script_path = None
if runme_files:
self.type = 'script'
self.script_path = os.path.join(path, runme_files[0])
elif test_files:
self.type = 'special'
elif os.path.isdir(os.path.join(path, 'tasks')):
self.type = 'role'
else:
self.type = 'unknown'
# static_aliases
try:
with open(os.path.join(path, 'aliases'), 'r') as aliases_file:
static_aliases = tuple(aliases_file.read().splitlines())
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
static_aliases = tuple()
# modules
if self.name in modules:
module = self.name
elif self.name.startswith('win_') and self.name[4:] in modules:
module = self.name[4:]
else:
module = None
self.modules = tuple(sorted(a for a in static_aliases + tuple([module]) if a in modules))
# groups
groups = [self.type]
groups += [a for a in static_aliases if a not in modules]
groups += ['module/%s' % m for m in self.modules]
if not self.modules:
groups.append('non_module')
if 'destructive' not in groups:
groups.append('non_destructive')
if '_' in self.name:
prefix = self.name[:self.name.find('_')]
else:
prefix = None
if prefix in prefixes:
group = prefixes[prefix]
if group != prefix:
group = '%s/%s' % (group, prefix)
groups.append(group)
if self.name.startswith('win_'):
groups.append('windows')
if self.name.startswith('connection_'):
groups.append('connection')
if self.name.startswith('setup_') or self.name.startswith('prepare_'):
groups.append('hidden')
if self.type not in ('script', 'role'):
groups.append('hidden')
for group in itertools.islice(groups, 0, len(groups)):
if '/' in group:
parts = group.split('/')
for i in range(1, len(parts)):
groups.append('/'.join(parts[:i]))
if not any(g in self.non_posix for g in groups):
groups.append('posix')
# aliases
aliases = [self.name] + \
['%s/' % g for g in groups] + \
['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
if 'hidden/' in aliases:
aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
self.aliases = tuple(sorted(set(aliases)))
class TargetPatternsNotMatched(ApplicationError):
"""One or more targets were not matched when a match was required."""
def __init__(self, patterns):
"""
:type patterns: set[str]
"""
self.patterns = sorted(patterns)
if len(patterns) > 1:
message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
else:
message = 'Target pattern not matched: %s' % self.patterns[0]
super(TargetPatternsNotMatched, self).__init__(message)
| gpl-3.0 |
frost-nzcr4/djangocms-installer | docs/conf.py | 3 | 8439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import djangocms_installer
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django CMS Installer'
copyright = u'2013, Iacopo Spalletti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djangocms_installer.__version__
# The full version, including alpha/beta/rc tags.
release = djangocms_installer.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangocms_installerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'djangocms_installer.tex', u'django CMS Installer Documentation',
u'Iacopo Spalletti', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangocms_installer', u'django CMS Installer Documentation',
[u'Iacopo Spalletti'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'djangocms_installer', u'django CMS Installer Documentation',
u'Iacopo Spalletti', 'djangocms_installer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
amerlyq/airy | vim/res/ycm_extra_conf.py | 1 | 5213 | # SEE: CACHE/bundle/YouCompleteMe/cpp/ycm/.ycm_extra_conf.py
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall', '-Wextra', '-Werror', '-Wc++98-compat',
'-Wno-long-long', '-Wno-variadic-macros', '-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-isystem', '../BoostParts',
# This path will only work on OS X, but extra paths that don't exist are not harmful
'-isystem', '/System/Library/Frameworks/Python.framework/Headers',
'-isystem', '../llvm/include',
'-isystem', '../llvm/tools/clang/include',
'-I', '.',
'-I', './ClangCompleter',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = os.path.abspath( '~/aura/pdrm/gerrit/build' )
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags, 'do_cache': True }
| mit |
rdo-management/ironic | ironic/tests/drivers/test_virtualbox.py | 4 | 18456 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for VirtualBox Driver Modules."""
import mock
from oslo_config import cfg
from pyremotevbox import exception as pyremotevbox_exc
from pyremotevbox import vbox as pyremotevbox_vbox
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import virtualbox
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.objects import utils as obj_utils
INFO_DICT = {
'virtualbox_vmname': 'baremetal1',
'virtualbox_host': '10.0.2.2',
'virtualbox_username': 'username',
'virtualbox_password': 'password',
'virtualbox_port': 12345,
}
CONF = cfg.CONF
class VirtualBoxMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxMethodsTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test__parse_driver_info(self):
info = virtualbox._parse_driver_info(self.node)
self.assertEqual('baremetal1', info['vmname'])
self.assertEqual('10.0.2.2', info['host'])
self.assertEqual('username', info['username'])
self.assertEqual('password', info['password'])
self.assertEqual(12345, info['port'])
def test__parse_driver_info_missing_vmname(self):
del self.node.driver_info['virtualbox_vmname']
self.assertRaises(exception.MissingParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_missing_host(self):
del self.node.driver_info['virtualbox_host']
self.assertRaises(exception.MissingParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_invalid_port(self):
self.node.driver_info['virtualbox_port'] = 'invalid-port'
self.assertRaises(exception.InvalidParameterValue,
virtualbox._parse_driver_info, self.node)
def test__parse_driver_info_missing_port(self):
del self.node.driver_info['virtualbox_port']
info = virtualbox._parse_driver_info(self.node)
self.assertEqual(18083, info['port'])
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method(self, host_mock):
host_object_mock = mock.MagicMock()
func_mock = mock.MagicMock()
vm_object_mock = mock.MagicMock(foo=func_mock)
host_mock.return_value = host_object_mock
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.return_value = 'return-value'
return_value = virtualbox._run_virtualbox_method(self.node,
'some-ironic-method', 'foo', 'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
self.assertEqual('return-value', return_value)
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method_get_host_fails(self, host_mock):
host_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method_find_vm_fails(self, host_mock):
host_object_mock = mock.MagicMock()
host_mock.return_value = host_object_mock
exc = pyremotevbox_exc.PyRemoteVBoxException
host_object_mock.find_vm.side_effect = exc
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo', 'args',
kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method_func_fails(self, host_mock):
host_object_mock = mock.MagicMock()
host_mock.return_value = host_object_mock
func_mock = mock.MagicMock()
vm_object_mock = mock.MagicMock(foo=func_mock)
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.side_effect = pyremotevbox_exc.PyRemoteVBoxException
self.assertRaises(exception.VirtualBoxOperationFailed,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method_invalid_method(self, host_mock):
host_object_mock = mock.MagicMock()
host_mock.return_value = host_object_mock
vm_object_mock = mock.MagicMock()
host_object_mock.find_vm.return_value = vm_object_mock
del vm_object_mock.foo
self.assertRaises(exception.InvalidParameterValue,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
@mock.patch.object(pyremotevbox_vbox, 'VirtualBoxHost')
def test__run_virtualbox_method_vm_wrong_power_state(self, host_mock):
host_object_mock = mock.MagicMock()
host_mock.return_value = host_object_mock
func_mock = mock.MagicMock()
vm_object_mock = mock.MagicMock(foo=func_mock)
host_object_mock.find_vm.return_value = vm_object_mock
func_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState
# _run_virtualbox_method() doesn't catch VmInWrongPowerState and
# lets caller handle it.
self.assertRaises(pyremotevbox_exc.VmInWrongPowerState,
virtualbox._run_virtualbox_method,
self.node, 'some-ironic-method', 'foo',
'args', kwarg='kwarg')
host_mock.assert_called_once_with(vmname='baremetal1',
host='10.0.2.2',
username='username',
password='password',
port=12345)
host_object_mock.find_vm.assert_called_once_with('baremetal1')
func_mock.assert_called_once_with('args', kwarg='kwarg')
class VirtualBoxPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxPowerTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.driver.power.get_properties()
self.assertIn('virtualbox_vmname', properties)
self.assertIn('virtualbox_host', properties)
@mock.patch.object(virtualbox, '_parse_driver_info')
def test_validate(self, parse_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.validate(task)
parse_info_mock.assert_called_once_with(task.node)
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_get_power_state(self, run_method_mock):
run_method_mock.return_value = 'PoweredOff'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
power_state = task.driver.power.get_power_state(task)
run_method_mock.assert_called_once_with(task.node,
'get_power_state',
'get_power_status')
self.assertEqual(states.POWER_OFF, power_state)
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_get_power_state_invalid_state(self, run_method_mock):
run_method_mock.return_value = 'invalid-state'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
power_state = task.driver.power.get_power_state(task)
run_method_mock.assert_called_once_with(task.node,
'get_power_state',
'get_power_status')
self.assertEqual(states.ERROR, power_state)
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_power_state_off(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
run_method_mock.assert_called_once_with(task.node,
'set_power_state',
'stop')
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_power_state_on(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
run_method_mock.assert_called_once_with(task.node,
'set_power_state',
'start')
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_power_state_reboot(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.REBOOT)
run_method_mock.assert_any_call(task.node,
'reboot',
'stop')
run_method_mock.assert_any_call(task.node,
'reboot',
'start')
def test_set_power_state_invalid_state(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.set_power_state,
task, 'invalid-state')
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_reboot(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
run_method_mock.assert_any_call(task.node,
'reboot',
'stop')
run_method_mock.assert_any_call(task.node,
'reboot',
'start')
class VirtualBoxManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(VirtualBoxManagementTestCase, self).setUp()
driver_info = INFO_DICT.copy()
mgr_utils.mock_the_extension_manager(driver="fake_vbox")
self.node = obj_utils.create_test_node(self.context,
driver='fake_vbox',
driver_info=driver_info)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.driver.management.get_properties()
self.assertIn('virtualbox_vmname', properties)
self.assertIn('virtualbox_host', properties)
@mock.patch.object(virtualbox, '_parse_driver_info')
def test_validate(self, parse_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.validate(task)
parse_info_mock.assert_called_once_with(task.node)
def test_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
devices = task.driver.management.get_supported_boot_devices()
self.assertIn(boot_devices.PXE, devices)
self.assertIn(boot_devices.DISK, devices)
self.assertIn(boot_devices.CDROM, devices)
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_get_boot_device_ok(self, run_method_mock):
run_method_mock.return_value = 'Network'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret_val = task.driver.management.get_boot_device(task)
run_method_mock.assert_called_once_with(task.node,
'get_boot_device',
'get_boot_device')
self.assertEqual(boot_devices.PXE, ret_val['boot_device'])
self.assertTrue(ret_val['persistent'])
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_get_boot_device_invalid(self, run_method_mock):
run_method_mock.return_value = 'invalid-boot-device'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret_val = task.driver.management.get_boot_device(task)
self.assertIsNone(ret_val['boot_device'])
self.assertIsNone(ret_val['persistent'])
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_boot_device_ok(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
run_method_mock.assert_called_once_with(task.node,
'set_boot_device',
'set_boot_device',
'Network')
@mock.patch.object(virtualbox, 'LOG')
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_boot_device_wrong_power_state(self, run_method_mock,
log_mock):
run_method_mock.side_effect = pyremotevbox_exc.VmInWrongPowerState
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
log_mock.error.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(virtualbox, '_run_virtualbox_method')
def test_set_boot_device_invalid(self, run_method_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'invalid-boot-device')
def test_get_sensors_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data,
task)
| apache-2.0 |
mjirayu/sit_academy | common/test/acceptance/tests/studio/test_studio_rerun.py | 122 | 4166 | """
Acceptance tests for Studio related to course reruns.
"""
import random
from bok_choy.promise import EmptyPromise
from nose.tools import assert_in
from ...pages.studio.index import DashboardPage
from ...pages.studio.course_rerun import CourseRerunPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
class CourseRerunTest(StudioCourseTest):
"""
Feature: Courses can be rerun
"""
__test__ = True
SECTION_NAME = 'Rerun Section'
SUBSECITON_NAME = 'Rerun Subsection'
UNIT_NAME = 'Rerun Unit'
COMPONENT_NAME = 'Rerun Component'
COMPONENT_CONTENT = 'Test Content'
def setUp(self):
"""
Login as global staff because that's the only way to rerun a course.
"""
super(CourseRerunTest, self).setUp(is_staff=True)
self.dashboard_page = DashboardPage(self.browser)
def populate_course_fixture(self, course_fixture):
"""
Create a sample course with one section, one subsection, one unit, and one component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECITON_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(
XBlockFixtureDesc('html', self.COMPONENT_NAME, self.COMPONENT_CONTENT)
)
)
)
)
def test_course_rerun(self):
"""
Scenario: Courses can be rerun
Given I have a course with a section, subsesction, vertical, and html component with content 'Test Content'
When I visit the course rerun page
And I type 'test_rerun' in the course run field
And I click Create Rerun
And I visit the course listing page
And I wait for all courses to finish processing
And I click on the course with run 'test_rerun'
Then I see a rerun notification on the course outline page
And when I click 'Dismiss' on the notification
Then I do not see a rerun notification
And when I expand the subsection and click on the unit
And I click 'View Live Version'
Then I see one html component with the content 'Test Content'
"""
course_info = (self.course_info['org'], self.course_info['number'], self.course_info['run'])
self.dashboard_page.visit()
self.dashboard_page.create_rerun(self.course_info['display_name'])
rerun_page = CourseRerunPage(self.browser, *course_info)
rerun_page.wait_for_page()
course_run = 'test_rerun_' + str(random.randrange(1000000, 9999999))
rerun_page.course_run = course_run
rerun_page.create_rerun()
def finished_processing():
self.dashboard_page.visit()
return not self.dashboard_page.has_processing_courses
EmptyPromise(finished_processing, "Rerun finished processing", try_interval=5, timeout=60).fulfill()
assert_in(course_run, self.dashboard_page.course_runs)
self.dashboard_page.click_course_run(course_run)
outline_page = CourseOutlinePage(self.browser, *course_info)
outline_page.wait_for_page()
self.assertTrue(outline_page.has_rerun_notification)
outline_page.dismiss_rerun_notification()
EmptyPromise(lambda: not outline_page.has_rerun_notification, "Rerun notification dismissed").fulfill()
subsection = outline_page.section(self.SECTION_NAME).subsection(self.SUBSECITON_NAME)
subsection.expand_subsection()
unit_page = subsection.unit(self.UNIT_NAME).go_to()
unit_page.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 1)
self.assertEqual(courseware.xblock_component_html_content(), self.COMPONENT_CONTENT)
| agpl-3.0 |
vnsofthe/odoo-dev | openerp/addons/base/tests/test_res_lang.py | 384 | 2104 | import unittest2
import openerp.tests.common as common
class test_res_lang(common.TransactionCase):
def test_00_intersperse(self):
from openerp.addons.base.res.res_lang import intersperse
assert intersperse("", []) == ("", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("012", []) == ("012", 0)
assert intersperse("1", []) == ("1", 0)
assert intersperse("12", []) == ("12", 0)
assert intersperse("123", []) == ("123", 0)
assert intersperse("1234", []) == ("1234", 0)
assert intersperse("123456789", []) == ("123456789", 0)
assert intersperse("&ab%#@1", []) == ("&ab%#@1", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("0", [1]) == ("0", 0)
assert intersperse("0", [2]) == ("0", 0)
assert intersperse("0", [200]) == ("0", 0)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [2], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,1], '.') == ('12345.6.78', 2)
assert intersperse("12345678", [2,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [-1,2], '.') == ('12345678', 0)
assert intersperse("12345678", [2,-1], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,0,1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,-1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [3,3,3,3], '.') == ('12.345.678', 2)
assert intersperse("abc1234567xy", [2], '.') == ('abc1234567.xy', 1)
assert intersperse("abc1234567xy8", [2], '.') == ('abc1234567x.y8', 1) # ... w.r.t. here.
assert intersperse("abc12", [3], '.') == ('abc12', 0)
assert intersperse("abc12", [2], '.') == ('abc12', 0)
assert intersperse("abc12", [1], '.') == ('abc1.2', 1)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Russell-IO/ansible | lib/ansible/modules/network/slxos/slxos_command.py | 25 | 7951 | #!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_command
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Run commands on remote devices running Extreme Networks SLX-OS
description:
- Sends arbitrary commands to an SLX node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(slxos_config) to configure SLX-OS devices.
notes:
- Tested against SLX-OS 17s.1.02
- If a command sent to the device requires answering a prompt, it is possible
to pass a dict containing I(command), I(answer) and I(prompt). See examples.
options:
commands:
description:
- List of commands to send to the remote SLX-OS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
default: null
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
slxos_command:
commands: show version
- name: run show version and check to see if output contains SLX
slxos_command:
commands: show version
wait_for: result[0] contains SLX
- name: run multiple commands on remote nodes
slxos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
slxos_command:
commands:
- show version
- show interface status
wait_for:
- result[0] contains SLX
- result[1] contains Eth
- name: run command that requires answering a prompt
slxos_command:
commands:
- command: 'clear sessions'
prompt: 'This operation will logout all the user sessions. Do you want to continue (yes/no)?:'
answer: y
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import re
import time
from ansible.module_utils.network.slxos.slxos import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
__metaclass__ = type
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for item in list(commands):
configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command'])
if module.check_mode:
if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'):
module.fail_json(
msg='slxos_command does not support running config mode '
'commands. Please use slxos_config instead'
)
if not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
niteshch207/universe | scripts/generate-config-reference.py | 3 | 3017 | #!/usr/bin/env python3
"""This script builds a Markdown file containing configuration references for
all packages (and all package versions) contained in the Mesosphere DC/OS
Universe repository. It outputs a single file, 'config-reference.md' in the
current working directory.
Usage: ./generate-config-reference.py [/path/to/universe/repo/packages]
"""
import json
import os
import sys
def find_config_files(path):
config_files = []
for root, dirs, files in os.walk(path):
for f in files:
if f == 'config.json':
config_files.append(os.path.join(root, f))
return config_files
def main(path):
files = find_config_files(path)
outfile = open(os.path.join(os.getcwd(), 'config-reference.md'), 'w')
outfile.write("# DC/OS Universe Package Configuration Reference\n\n")
for f in files:
with open(f, 'r') as config:
package_name = f.split('/')[-3]
package_version = f.split('/')[-2]
outfile.write("## {} version {}\n\n".format(package_name, package_version))
props = json.loads(config.read())['properties']
for key, value in props.items():
if key == "properties":
outfile.write("*Errors encountered when processing config properties. Not all properties may be listed here. Please verify the structure of this package and package version.*\n\n")
continue
outfile.write("### {} configuration properties\n\n".format(key))
outfile.write("| Property | Type | Description | Default Value |\n")
outfile.write("|----------|------|-------------|---------------|\n")
for _, prop in value.items():
if type(prop) is not dict:
continue
for key, details in prop.items():
prop = key
try:
typ = details['type']
except KeyError:
typ = "*No type provided.*"
try:
desc = details['description']
except KeyError:
desc = "*No description provided.*"
try:
default = "`{}`".format(details['default'])
if default == "``":
default = "*Empty string.*"
except KeyError:
default = "*No default.*"
outfile.write("| {prop} | {typ} | {desc} | {default} |\n".format(
prop=prop, desc=desc, typ=typ, default=default))
outfile.write("\n")
outfile.close()
if __name__ == '__main__':
if len(sys.argv) == 2:
path = sys.argv[1]
else:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../repo/packages')
main(path)
| apache-2.0 |
kamcpp/tensorflow | tensorflow/contrib/opt/__init__.py | 11 | 1119 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""opt: A module containing optimization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.opt.python.training.external_optimizer import *
from tensorflow.contrib.opt.python.training.moving_average_optimizer import *
from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
| apache-2.0 |
LarsFronius/ansible | lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py | 7 | 5916 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_providers_facts
short_description: Retrieve facts about one or more oVirt/RHV external providers
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV external providers."
notes:
- "This module creates a new top-level C(ovirt_external_providers) fact, which
contains a list of external_providers."
options:
type:
description:
- "Type of the external provider."
choices: ['os_image', 'os_network', 'os_volume', 'foreman']
required: true
name:
description:
- "Name of the external provider, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all image external providers named C<glance>:
- ovirt_external_providers_facts:
type: os_image
name: glance
- debug:
var: ovirt_external_providers
'''
RETURN = '''
external_host_providers:
description: "List of dictionaries of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider."
returned: "On success and if parameter 'type: foreman' is used."
type: list
openstack_image_providers:
description: "List of dictionaries of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider."
returned: "On success and if parameter 'type: os_image' is used."
type: list
openstack_volume_providers:
description: "List of dictionaries of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider."
returned: "On success and if parameter 'type: os_volume' is used."
type: list
openstack_network_providers:
description: "List of dictionaries of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider."
returned: "On success and if parameter 'type: os_network' is used."
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def _external_provider_service(provider_type, system_service):
if provider_type == 'os_image':
return system_service.openstack_image_providers_service()
elif provider_type == 'os_network':
return system_service.openstack_network_providers_service()
elif provider_type == 'os_volume':
return system_service.openstack_volume_providers_service()
elif provider_type == 'foreman':
return system_service.external_host_providers_service()
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None, required=False),
type=dict(
default=None,
required=True,
choices=[
'os_image', 'os_network', 'os_volume', 'foreman',
],
aliases=['provider'],
),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
external_providers_service = _external_provider_service(
provider_type=module.params.pop('type'),
system_service=connection.system_service(),
)
if module.params['name']:
external_providers = [
e for e in external_providers_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
else:
external_providers = external_providers_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_external_providers=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in external_providers
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/optimize/nnls.py | 116 | 1423 | from __future__ import division, print_function, absolute_import
from . import _nnls
from numpy import asarray_chkfinite, zeros, double
__all__ = ['nnls']
def nnls(A, b):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
for a FORTAN non-negative least squares solver.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
Notes
-----
The FORTRAN code was published in the book below. The algorithm
is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
conditions for the non-negative least squares problem.
References
----------
Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
"""
A, b = map(asarray_chkfinite, (A, b))
if len(A.shape) != 2:
raise ValueError("expected matrix")
if len(b.shape) != 1:
raise ValueError("expected vector")
m, n = A.shape
if m != b.shape[0]:
raise ValueError("incompatible dimensions")
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index = zeros((n,), dtype=int)
x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
| mit |
nugget/home-assistant | tests/components/frontend/test_storage.py | 10 | 4742 | """The tests for frontend storage."""
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components.frontend import storage
@pytest.fixture(autouse=True)
def setup_frontend(hass):
"""Fixture to setup the frontend."""
hass.loop.run_until_complete(async_setup_component(hass, 'frontend', {}))
async def test_get_user_data_empty(hass, hass_ws_client, hass_storage):
"""Test get_user_data command."""
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'frontend/get_user_data',
'key': 'non-existing-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] is None
async def test_get_user_data(hass, hass_ws_client, hass_admin_user,
hass_storage):
"""Test get_user_data command."""
storage_key = storage.STORAGE_KEY_USER_DATA.format(hass_admin_user.id)
hass_storage[storage_key] = {
'key': storage_key,
'version': 1,
'data': {
'test-key': 'test-value',
'test-complex': [{'foo': 'bar'}]
}
}
client = await hass_ws_client(hass)
# Get a simple string key
await client.send_json({
'id': 6,
'type': 'frontend/get_user_data',
'key': 'test-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] == 'test-value'
# Get a more complex key
await client.send_json({
'id': 7,
'type': 'frontend/get_user_data',
'key': 'test-complex',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'][0]['foo'] == 'bar'
# Get all data (no key)
await client.send_json({
'id': 8,
'type': 'frontend/get_user_data',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value']['test-key'] == 'test-value'
assert res['result']['value']['test-complex'][0]['foo'] == 'bar'
async def test_set_user_data_empty(hass, hass_ws_client, hass_storage):
"""Test set_user_data command."""
client = await hass_ws_client(hass)
# test creating
await client.send_json({
'id': 6,
'type': 'frontend/get_user_data',
'key': 'test-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] is None
await client.send_json({
'id': 7,
'type': 'frontend/set_user_data',
'key': 'test-key',
'value': 'test-value'
})
res = await client.receive_json()
assert res['success'], res
await client.send_json({
'id': 8,
'type': 'frontend/get_user_data',
'key': 'test-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] == 'test-value'
async def test_set_user_data(hass, hass_ws_client, hass_storage,
hass_admin_user):
"""Test set_user_data command with initial data."""
storage_key = storage.STORAGE_KEY_USER_DATA.format(hass_admin_user.id)
hass_storage[storage_key] = {
'version': 1,
'data': {
'test-key': 'test-value',
'test-complex': 'string',
}
}
client = await hass_ws_client(hass)
# test creating
await client.send_json({
'id': 5,
'type': 'frontend/set_user_data',
'key': 'test-non-existent-key',
'value': 'test-value-new'
})
res = await client.receive_json()
assert res['success'], res
await client.send_json({
'id': 6,
'type': 'frontend/get_user_data',
'key': 'test-non-existent-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] == 'test-value-new'
# test updating with complex data
await client.send_json({
'id': 7,
'type': 'frontend/set_user_data',
'key': 'test-complex',
'value': [{'foo': 'bar'}]
})
res = await client.receive_json()
assert res['success'], res
await client.send_json({
'id': 8,
'type': 'frontend/get_user_data',
'key': 'test-complex',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'][0]['foo'] == 'bar'
# ensure other existing key was not modified
await client.send_json({
'id': 9,
'type': 'frontend/get_user_data',
'key': 'test-key',
})
res = await client.receive_json()
assert res['success'], res
assert res['result']['value'] == 'test-value'
| apache-2.0 |
mdanielwork/intellij-community | python/testData/MockSdk2.7/python_stubs/__builtin__.py | 19 | 174731 | # encoding: utf-8
# module __builtin__
# from (built-in)
# by generator 1.145
from __future__ import print_function
"""
Built-in functions, exceptions, and other objects.
Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices.
"""
# imports
from exceptions import (ArithmeticError, AssertionError, AttributeError,
BaseException, BufferError, BytesWarning, DeprecationWarning, EOFError,
EnvironmentError, Exception, FloatingPointError, FutureWarning,
GeneratorExit, IOError, ImportError, ImportWarning, IndentationError,
IndexError, KeyError, KeyboardInterrupt, LookupError, MemoryError,
NameError, NotImplementedError, OSError, OverflowError,
PendingDeprecationWarning, ReferenceError, RuntimeError, RuntimeWarning,
StandardError, StopIteration, SyntaxError, SyntaxWarning, SystemError,
SystemExit, TabError, TypeError, UnboundLocalError, UnicodeDecodeError,
UnicodeEncodeError, UnicodeError, UnicodeTranslateError, UnicodeWarning,
UserWarning, ValueError, Warning, ZeroDivisionError)
# Variables with simple values
False = False
None = object() # real value of type <type 'NoneType'> replaced
True = True
__debug__ = True
# functions
def abs(number): # real signature unknown; restored from __doc__
"""
abs(number) -> number
Return the absolute value of the argument.
"""
return 0
def all(iterable): # real signature unknown; restored from __doc__
"""
all(iterable) -> bool
Return True if bool(x) is True for all values x in the iterable.
If the iterable is empty, return True.
"""
return False
def any(iterable): # real signature unknown; restored from __doc__
"""
any(iterable) -> bool
Return True if bool(x) is True for any x in the iterable.
If the iterable is empty, return False.
"""
return False
def apply(p_object, args=None, kwargs=None): # real signature unknown; restored from __doc__
"""
apply(object[, args[, kwargs]]) -> value
Call a callable object with positional arguments taken from the tuple args,
and keyword arguments taken from the optional dictionary kwargs.
Note that classes are callable, as are instances with a __call__() method.
Deprecated since release 2.3. Instead, use the extended call syntax:
function(*args, **keywords).
"""
pass
def bin(number): # real signature unknown; restored from __doc__
"""
bin(number) -> string
Return the binary representation of an integer or long integer.
"""
return ""
def callable(p_object): # real signature unknown; restored from __doc__
"""
callable(object) -> bool
Return whether the object is callable (i.e., some kind of function).
Note that classes are callable, as are instances with a __call__() method.
"""
return False
def chr(i): # real signature unknown; restored from __doc__
"""
chr(i) -> character
Return a string of one character with ordinal i; 0 <= i < 256.
"""
return ""
def cmp(x, y): # real signature unknown; restored from __doc__
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return 0
def coerce(x, y): # real signature unknown; restored from __doc__
"""
coerce(x, y) -> (x1, y1)
Return a tuple consisting of the two numeric arguments converted to
a common type, using the same rules as used by arithmetic operations.
If coercion is not possible, raise TypeError.
"""
pass
def compile(source, filename, mode, flags=None, dont_inherit=None): # real signature unknown; restored from __doc__
"""
compile(source, filename, mode[, flags[, dont_inherit]]) -> code object
Compile the source string (a Python module, statement or expression)
into a code object that can be executed by the exec statement or eval().
The filename will be used for run-time error messages.
The mode must be 'exec' to compile a module, 'single' to compile a
single (interactive) statement, or 'eval' to compile an expression.
The flags argument, if present, controls which future statements influence
the compilation of the code.
The dont_inherit argument, if non-zero, stops the compilation inheriting
the effects of any future statements in effect in the code calling
compile; if absent or zero these statements do influence the compilation,
in addition to any features explicitly specified.
"""
pass
def copyright(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def credits(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def delattr(p_object, name): # real signature unknown; restored from __doc__
"""
delattr(object, name)
Delete a named attribute on an object; delattr(x, 'y') is equivalent to
``del x.y''.
"""
pass
def dir(p_object=None): # real signature unknown; restored from __doc__
"""
dir([object]) -> list of strings
If called without an argument, return the names in the current scope.
Else, return an alphabetized list of names comprising (some of) the attributes
of the given object, and of attributes reachable from it.
If the object supplies a method named __dir__, it will be used; otherwise
the default dir() logic is used and returns:
for a module object: the module's attributes.
for a class object: its attributes, and recursively the attributes
of its bases.
for any other object: its attributes, its class's attributes, and
recursively the attributes of its class's base classes.
"""
return []
def divmod(x, y): # known case of __builtin__.divmod
"""
divmod(x, y) -> (quotient, remainder)
Return the tuple ((x-x%y)/y, x%y). Invariant: div*y + mod == x.
"""
return (0, 0)
def eval(source, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
eval(source[, globals[, locals]]) -> value
Evaluate the source in the context of globals and locals.
The source may be a string representing a Python expression
or a code object as returned by compile().
The globals must be a dictionary and locals can be any mapping,
defaulting to the current globals and locals.
If only globals is given, locals defaults to it.
"""
pass
def execfile(filename, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
execfile(filename[, globals[, locals]])
Read and execute a Python script from a file.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
pass
def exit(*args, **kwargs): # real signature unknown
pass
def filter(function_or_none, sequence): # known special case of filter
"""
filter(function or None, sequence) -> list, tuple, or string
Return those items of sequence for which function(item) is true. If
function is None, return the items that are true. If sequence is a tuple
or string, return the same type, else return a list.
"""
pass
def format(value, format_spec=None): # real signature unknown; restored from __doc__
"""
format(value[, format_spec]) -> string
Returns value.__format__(format_spec)
format_spec defaults to ""
"""
return ""
def getattr(object, name, default=None): # known special case of getattr
"""
getattr(object, name[, default]) -> value
Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
When a default argument is given, it is returned when the attribute doesn't
exist; without it, an exception is raised in that case.
"""
pass
def globals(): # real signature unknown; restored from __doc__
"""
globals() -> dictionary
Return the dictionary containing the current scope's global variables.
"""
return {}
def hasattr(p_object, name): # real signature unknown; restored from __doc__
"""
hasattr(object, name) -> bool
Return whether the object has an attribute with the given name.
(This is done by calling getattr(object, name) and catching exceptions.)
"""
return False
def hash(p_object): # real signature unknown; restored from __doc__
"""
hash(object) -> integer
Return a hash value for the object. Two objects with the same value have
the same hash value. The reverse is not necessarily true, but likely.
"""
return 0
def help(with_a_twist): # real signature unknown; restored from __doc__
"""
Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
pass
def hex(number): # real signature unknown; restored from __doc__
"""
hex(number) -> string
Return the hexadecimal representation of an integer or long integer.
"""
return ""
def id(p_object): # real signature unknown; restored from __doc__
"""
id(object) -> integer
Return the identity of an object. This is guaranteed to be unique among
simultaneously existing objects. (Hint: it's the object's memory address.)
"""
return 0
def input(prompt=None): # real signature unknown; restored from __doc__
"""
input([prompt]) -> value
Equivalent to eval(raw_input(prompt)).
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def isinstance(p_object, class_or_type_or_tuple): # real signature unknown; restored from __doc__
"""
isinstance(object, class-or-type-or-tuple) -> bool
Return whether an object is an instance of a class or of a subclass thereof.
With a type as second argument, return whether that is the object's type.
The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for
isinstance(x, A) or isinstance(x, B) or ... (etc.).
"""
return False
def issubclass(C, B): # real signature unknown; restored from __doc__
"""
issubclass(C, B) -> bool
Return whether class C is a subclass (i.e., a derived class) of class B.
When using a tuple as the second argument issubclass(X, (A, B, ...)),
is a shortcut for issubclass(X, A) or issubclass(X, B) or ... (etc.).
"""
return False
def iter(source, sentinel=None): # known special case of iter
"""
iter(collection) -> iterator
iter(callable, sentinel) -> iterator
Get an iterator from an object. In the first form, the argument must
supply its own iterator, or be a sequence.
In the second form, the callable is called until it returns the sentinel.
"""
pass
def len(p_object): # real signature unknown; restored from __doc__
"""
len(object) -> integer
Return the number of items of a sequence or collection.
"""
return 0
def license(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def locals(): # real signature unknown; restored from __doc__
"""
locals() -> dictionary
Update and return a dictionary containing the current scope's local variables.
"""
return {}
def map(function, sequence, *sequence_1): # real signature unknown; restored from __doc__
"""
map(function, sequence[, sequence, ...]) -> list
Return a list of the results of applying the function to the items of
the argument sequence(s). If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of
the items of the sequence (or a list of tuples if more than one sequence).
"""
return []
def max(*args, **kwargs): # known special case of max
"""
max(iterable[, key=func]) -> value
max(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its largest item.
With two or more arguments, return the largest argument.
"""
pass
def min(*args, **kwargs): # known special case of min
"""
min(iterable[, key=func]) -> value
min(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its smallest item.
With two or more arguments, return the smallest argument.
"""
pass
def next(iterator, default=None): # real signature unknown; restored from __doc__
"""
next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration.
"""
pass
def oct(number): # real signature unknown; restored from __doc__
"""
oct(number) -> string
Return the octal representation of an integer or long integer.
"""
return ""
def open(name, mode=None, buffering=None): # real signature unknown; restored from __doc__
"""
open(name[, mode[, buffering]]) -> file object
Open a file using the file() type, returns a file object. This is the
preferred way to open a file. See file.__doc__ for further information.
"""
return file('/dev/null')
def ord(c): # real signature unknown; restored from __doc__
"""
ord(c) -> integer
Return the integer ordinal of a one-character string.
"""
return 0
def pow(x, y, z=None): # real signature unknown; restored from __doc__
"""
pow(x, y[, z]) -> number
With two arguments, equivalent to x**y. With three arguments,
equivalent to (x**y) % z, but may be more efficient (e.g. for longs).
"""
return 0
def print(*args, **kwargs): # known special case of print
"""
print(value, ..., sep=' ', end='\n', file=sys.stdout)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
"""
pass
def quit(*args, **kwargs): # real signature unknown
pass
def range(start=None, stop=None, step=None): # known special case of range
"""
range(stop) -> list of integers
range(start, stop[, step]) -> list of integers
Return a list containing an arithmetic progression of integers.
range(i, j) returns [i, i+1, i+2, ..., j-1]; start (!) defaults to 0.
When step is given, it specifies the increment (or decrement).
For example, range(4) returns [0, 1, 2, 3]. The end point is omitted!
These are exactly the valid indices for a list of 4 elements.
"""
pass
def raw_input(prompt=None): # real signature unknown; restored from __doc__
"""
raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading.
"""
return ""
def reduce(function, sequence, initial=None): # real signature unknown; restored from __doc__
"""
reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty.
"""
pass
def reload(module): # real signature unknown; restored from __doc__
"""
reload(module) -> module
Reload the module. The module must have been successfully imported before.
"""
pass
def repr(p_object): # real signature unknown; restored from __doc__
"""
repr(object) -> string
Return the canonical string representation of the object.
For most object types, eval(repr(object)) == object.
"""
return ""
def round(number, ndigits=None): # real signature unknown; restored from __doc__
"""
round(number[, ndigits]) -> floating point number
Round a number to a given precision in decimal digits (default 0 digits).
This always returns a floating point number. Precision may be negative.
"""
return 0.0
def setattr(p_object, name, value): # real signature unknown; restored from __doc__
"""
setattr(object, name, value)
Set a named attribute on an object; setattr(x, 'y', v) is equivalent to
``x.y = v''.
"""
pass
def sorted(iterable, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
""" sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list """
pass
def sum(sequence, start=None): # real signature unknown; restored from __doc__
"""
sum(sequence[, start]) -> value
Return the sum of a sequence of numbers (NOT strings) plus the value
of parameter 'start' (which defaults to 0). When the sequence is
empty, return start.
"""
pass
def unichr(i): # real signature unknown; restored from __doc__
"""
unichr(i) -> Unicode character
Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.
"""
return u""
def vars(p_object=None): # real signature unknown; restored from __doc__
"""
vars([object]) -> dictionary
Without arguments, equivalent to locals().
With an argument, equivalent to object.__dict__.
"""
return {}
def zip(seq1, seq2, *more_seqs): # known special case of zip
"""
zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)]
Return a list of tuples, where each tuple contains the i-th element
from each of the argument sequences. The returned list is truncated
in length to the length of the shortest argument sequence.
"""
pass
def __import__(name, globals={}, locals={}, fromlist=[], level=-1): # real signature unknown; restored from __doc__
"""
__import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module
Import a module. Because this function is meant for use by the Python
interpreter and not for general use it is better to use
importlib.import_module() to programmatically import a module.
The globals argument is only used to determine the context;
they are not modified. The locals argument is unused. The fromlist
should be a list of names to emulate ``from name import ...'', or an
empty list to emulate ``import name''.
When importing a module from a package, note that __import__('A.B', ...)
returns package A when fromlist is empty, but its submodule B when
fromlist is not empty. Level is used to determine whether to perform
absolute or relative imports. -1 is the original strategy of attempting
both absolute and relative imports, 0 is absolute, a positive number
is the number of parent directories to search relative to the current module.
"""
pass
# classes
class ___Classobj:
'''A mock class representing the old style class base.'''
__module__ = ''
__class__ = None
def __init__(self):
pass
__dict__ = {}
__doc__ = ''
class __generator(object):
'''A mock class representing the generator function type.'''
def __init__(self):
self.gi_code = None
self.gi_frame = None
self.gi_running = 0
def __iter__(self):
'''Defined to support iteration over container.'''
pass
def next(self):
'''Return the next item from the container.'''
pass
def close(self):
'''Raises new GeneratorExit exception inside the generator to terminate the iteration.'''
pass
def send(self, value):
'''Resumes the generator and "sends" a value that becomes the result of the current yield-expression.'''
pass
def throw(self, type, value=None, traceback=None):
'''Used to raise an exception inside the generator.'''
pass
class __function(object):
'''A mock class representing function type.'''
def __init__(self):
self.__name__ = ''
self.__doc__ = ''
self.__dict__ = ''
self.__module__ = ''
self.func_defaults = {}
self.func_globals = {}
self.func_closure = None
self.func_code = None
self.func_name = ''
self.func_doc = ''
self.func_dict = ''
self.__defaults__ = {}
self.__globals__ = {}
self.__closure__ = None
self.__code__ = None
self.__name__ = ''
class __method(object):
'''A mock class representing method type.'''
def __init__(self):
self.im_class = None
self.im_self = None
self.im_func = None
self.__func__ = None
self.__self__ = None
class __namedtuple(tuple):
'''A mock base class for named tuples.'''
__slots__ = ()
_fields = ()
def __new__(cls, *args, **kwargs):
'Create a new instance of the named tuple.'
return tuple.__new__(cls, *args)
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new named tuple object from a sequence or iterable.'
return new(cls, iterable)
def __repr__(self):
return ''
def _asdict(self):
'Return a new dict which maps field types to their values.'
return {}
def _replace(self, **kwargs):
'Return a new named tuple object replacing specified fields with new values.'
return self
def __getnewargs__(self):
return tuple(self)
class object:
""" The most base type """
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" default object formatter """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self): # known special case of object.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
@staticmethod # known case of __new__
def __new__(cls, *more): # known special case of object.__new__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
__sizeof__() -> int
size of object in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
@classmethod # known case
def __subclasshook__(cls, subclass): # known special case of object.__subclasshook__
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
__class__ = None # (!) forward: type, real value is ''
__dict__ = {}
__doc__ = ''
__module__ = ''
class basestring(object):
""" Type basestring cannot be instantiated; it is the base for str and unicode. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class int(object):
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
int.bit_length() -> int
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x, base=10): # known special case of int.__init__
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
# (copied from class doc)
"""
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class bool(int):
"""
bool(x) -> bool
Returns True when the argument x is true, False otherwise.
The builtins True and False are the only two instances of the class bool.
The class bool is a subclass of the class int, and cannot be subclassed.
"""
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class buffer(object):
"""
buffer(object [, offset[, size]])
Create a new buffer object which references the given object.
The buffer will reference a slice of the target object from the
start of the object (or at the specified offset). The slice will
extend to the end of the target object (or with the specified size).
"""
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, p_object, offset=None, size=None): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class bytearray(object):
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
"""
def append(self, p_int): # real signature unknown; restored from __doc__
"""
B.append(int) -> None
Append a single item to the end of B.
"""
pass
def capitalize(self): # real signature unknown; restored from __doc__
"""
B.capitalize() -> copy of B
Return a copy of B with only its first character capitalized (ASCII)
and the rest lower-cased.
"""
pass
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.center(width[, fillchar]) -> copy of B
Return B centered in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.count(sub [,start [,end]]) -> int
Return the number of non-overlapping occurrences of subsection sub in
bytes B[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
B.decode([encoding[, errors]]) -> unicode object.
Decodes B using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return u""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.endswith(suffix [,start [,end]]) -> bool
Return True if B ends with the specified suffix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
B.expandtabs([tabsize]) -> copy of B
Return a copy of B where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
pass
def extend(self, iterable_int): # real signature unknown; restored from __doc__
"""
B.extend(iterable int) -> None
Append all the elements from the iterator or sequence to the
end of B.
"""
pass
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.find(sub [,start [,end]]) -> int
Return the lowest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
@classmethod # known case
def fromhex(cls, string): # real signature unknown; restored from __doc__
"""
bytearray.fromhex(string) -> bytearray
Create a bytearray object from a string of hexadecimal numbers.
Spaces between two numbers are accepted.
Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef').
"""
return bytearray
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.index(sub [,start [,end]]) -> int
Like B.find() but raise ValueError when the subsection is not found.
"""
return 0
def insert(self, index, p_int): # real signature unknown; restored from __doc__
"""
B.insert(index, int) -> None
Insert a single item into the bytearray before the given index.
"""
pass
def isalnum(self): # real signature unknown; restored from __doc__
"""
B.isalnum() -> bool
Return True if all characters in B are alphanumeric
and there is at least one character in B, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
B.isalpha() -> bool
Return True if all characters in B are alphabetic
and there is at least one character in B, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
B.isdigit() -> bool
Return True if all characters in B are digits
and there is at least one character in B, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
B.islower() -> bool
Return True if all cased characters in B are lowercase and there is
at least one cased character in B, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
B.isspace() -> bool
Return True if all characters in B are whitespace
and there is at least one character in B, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
B.istitle() -> bool
Return True if B is a titlecased string and there is at least one
character in B, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
B.isupper() -> bool
Return True if all cased characters in B are uppercase and there is
at least one cased character in B, False otherwise.
"""
return False
def join(self, iterable_of_bytes): # real signature unknown; restored from __doc__
"""
B.join(iterable_of_bytes) -> bytes
Concatenates any number of bytearray objects, with B in between each pair.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.ljust(width[, fillchar]) -> copy of B
Return B left justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def lower(self): # real signature unknown; restored from __doc__
"""
B.lower() -> copy of B
Return a copy of B with all ASCII characters converted to lowercase.
"""
pass
def lstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.lstrip([bytes]) -> bytearray
Strip leading bytes contained in the argument.
If the argument is omitted, strip leading ASCII whitespace.
"""
return bytearray
def partition(self, sep): # real signature unknown; restored from __doc__
"""
B.partition(sep) -> (head, sep, tail)
Searches for the separator sep in B, and returns the part before it,
the separator itself, and the part after it. If the separator is not
found, returns B and two empty bytearray objects.
"""
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
B.pop([index]) -> int
Remove and return a single item from B. If no index
argument is given, will pop the last value.
"""
return 0
def remove(self, p_int): # real signature unknown; restored from __doc__
"""
B.remove(int) -> None
Remove the first occurance of a value in B.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
B.replace(old, new[, count]) -> bytes
Return a copy of B with all occurrences of subsection
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def reverse(self): # real signature unknown; restored from __doc__
"""
B.reverse() -> None
Reverse the order of the values in B in place.
"""
pass
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rfind(sub [,start [,end]]) -> int
Return the highest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rindex(sub [,start [,end]]) -> int
Like B.rfind() but raise ValueError when the subsection is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.rjust(width[, fillchar]) -> copy of B
Return B right justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
pass
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
B.rpartition(sep) -> (head, sep, tail)
Searches for the separator sep in B, starting at the end of B,
and returns the part before it, the separator itself, and the
part after it. If the separator is not found, returns two empty
bytearray objects and B.
"""
pass
def rsplit(self, sep, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.rsplit(sep[, maxsplit]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter,
starting at the end of B and working to the front.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def rstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.rstrip([bytes]) -> bytearray
Strip trailing bytes contained in the argument.
If the argument is omitted, strip trailing ASCII whitespace.
"""
return bytearray
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.split([sep[, maxsplit]]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
B.splitlines(keepends=False) -> list of lines
Return a list of the lines in B, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.startswith(prefix [,start [,end]]) -> bool
Return True if B starts with the specified prefix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.strip([bytes]) -> bytearray
Strip leading and trailing bytes contained in the argument.
If the argument is omitted, strip ASCII whitespace.
"""
return bytearray
def swapcase(self): # real signature unknown; restored from __doc__
"""
B.swapcase() -> copy of B
Return a copy of B with uppercase ASCII characters converted
to lowercase ASCII and vice versa.
"""
pass
def title(self): # real signature unknown; restored from __doc__
"""
B.title() -> copy of B
Return a titlecased version of B, i.e. ASCII words start with uppercase
characters, all remaining cased characters have lowercase.
"""
pass
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
B.translate(table[, deletechars]) -> bytearray
Return a copy of B, where all characters occurring in the
optional argument deletechars are removed, and the remaining
characters have been mapped through the given translation
table, which must be a bytes object of length 256.
"""
return bytearray
def upper(self): # real signature unknown; restored from __doc__
"""
B.upper() -> copy of B
Return a copy of B with all ASCII characters converted to uppercase.
"""
pass
def zfill(self, width): # real signature unknown; restored from __doc__
"""
B.zfill(width) -> copy of B
Pad a numeric string B with zeros on the left, to fill a field
of the specified width. B is never truncated.
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __alloc__(self): # real signature unknown; restored from __doc__
"""
B.__alloc__() -> int
Returns the number of bytes actually allocated.
"""
return 0
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, source=None, encoding=None, errors='strict'): # known special case of bytearray.__init__
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
B.__sizeof__() -> int
Returns the size of B in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class str(basestring):
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> string
Return a copy of the string S with only its first character
capitalized.
"""
return ""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> string
Return S centered in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
string S[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> object
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return object()
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> object
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that is able to handle UnicodeEncodeErrors.
"""
return object()
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> string
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return ""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(self, *args, **kwargs): # known special case of str.format
"""
S.format(*args, **kwargs) -> string
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> string
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> string
Return S left-justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
return ""
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> string
Return a copy of the string S converted to lowercase.
"""
return ""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> string or unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> string
Return a copy of string S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> string
Return S right-justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> string or unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are removed
from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> string or unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> string
Return a copy of the string S with uppercase characters
converted to lowercase and vice versa.
"""
return ""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> string
Return a titlecased version of S, i.e. words start with uppercase
characters, all remaining cased characters have lowercase.
"""
return ""
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
S.translate(table [,deletechars]) -> string
Return a copy of the string S, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256 or None.
If the table argument is None, no translation is applied and
the operation simply removes the characters in deletechars.
"""
return ""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
return ""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> string
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return ""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> string
Return a formatted version of S as described by format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=''): # known special case of str.__init__
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
bytes = str
class classmethod(object):
"""
classmethod(function) -> method
Convert a function to be a class method.
A class method receives the class as implicit first argument,
just like an instance method receives the instance.
To declare a class method, use this idiom:
class C:
def f(cls, arg1, arg2, ...): ...
f = classmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
If a class method is called for a derived class, the derived class
object is passed as the implied first argument.
Class methods are different than C++ or Java static methods.
If you want those, see the staticmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class complex(object):
"""
complex(real[, imag]) -> complex number
Create a complex number from a real part and an optional imaginary part.
This is equivalent to (real + imag*1j) where imag defaults to 0.
"""
def conjugate(self): # real signature unknown; restored from __doc__
"""
complex.conjugate() -> complex
Return the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.
"""
return complex
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self): # real signature unknown; restored from __doc__
"""
complex.__format__() -> str
Convert to a string according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, real, imag=None): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
imag = property(lambda self: 0.0)
"""the imaginary part of a complex number
:type: float
"""
real = property(lambda self: 0.0)
"""the real part of a complex number
:type: float
"""
class dict(object):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
def clear(self): # real signature unknown; restored from __doc__
""" D.clear() -> None. Remove all items from D. """
pass
def copy(self): # real signature unknown; restored from __doc__
""" D.copy() -> a shallow copy of D """
pass
@staticmethod # known case
def fromkeys(S, v=None): # real signature unknown; restored from __doc__
"""
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
v defaults to None.
"""
pass
def get(self, k, d=None): # real signature unknown; restored from __doc__
""" D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. """
pass
def has_key(self, k): # real signature unknown; restored from __doc__
""" D.has_key(k) -> True if D has a key k, else False """
return False
def items(self): # real signature unknown; restored from __doc__
""" D.items() -> list of D's (key, value) pairs, as 2-tuples """
return []
def iteritems(self): # real signature unknown; restored from __doc__
""" D.iteritems() -> an iterator over the (key, value) items of D """
pass
def iterkeys(self): # real signature unknown; restored from __doc__
""" D.iterkeys() -> an iterator over the keys of D """
pass
def itervalues(self): # real signature unknown; restored from __doc__
""" D.itervalues() -> an iterator over the values of D """
pass
def keys(self): # real signature unknown; restored from __doc__
""" D.keys() -> list of D's keys """
return []
def pop(self, k, d=None): # real signature unknown; restored from __doc__
"""
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised
"""
pass
def popitem(self): # real signature unknown; restored from __doc__
"""
D.popitem() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if D is empty.
"""
pass
def setdefault(self, k, d=None): # real signature unknown; restored from __doc__
""" D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """
pass
def update(self, E=None, **F): # known special case of dict.update
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
"""
pass
def values(self): # real signature unknown; restored from __doc__
""" D.values() -> list of D's values """
return []
def viewitems(self): # real signature unknown; restored from __doc__
""" D.viewitems() -> a set-like object providing a view on D's items """
pass
def viewkeys(self): # real signature unknown; restored from __doc__
""" D.viewkeys() -> a set-like object providing a view on D's keys """
pass
def viewvalues(self): # real signature unknown; restored from __doc__
""" D.viewvalues() -> an object providing a view on D's values """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, k): # real signature unknown; restored from __doc__
""" D.__contains__(k) -> True if D has a key k, else False """
return False
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, seq=None, **kwargs): # known special case of dict.__init__
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" D.__sizeof__() -> size of D in memory, in bytes """
pass
__hash__ = None
class enumerate(object):
"""
enumerate(iterable[, start]) -> iterator for index, value of iterable
Return an enumerate object. iterable must be another object that supports
iteration. The enumerate object yields pairs containing a count (from
start, which defaults to zero) and a value yielded by the iterable argument.
enumerate is useful for obtaining an indexed list:
(0, seq[0]), (1, seq[1]), (2, seq[2]), ...
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, iterable, start=0): # known special case of enumerate.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class file(object):
"""
file(name[, mode[, buffering]]) -> file object
Open a file. The mode can be 'r', 'w' or 'a' for reading (default),
writing or appending. The file will be created if it doesn't exist
when opened for writing or appending; it will be truncated when
opened for writing. Add a 'b' to the mode for binary files.
Add a '+' to the mode to allow simultaneous reading and writing.
If the buffering argument is given, 0 means unbuffered, 1 means line
buffered, and larger numbers specify the buffer size. The preferred way
to open a file is with the builtin open() function.
Add a 'U' to mode to open the file for input with universal newline
support. Any line ending in the input file will be seen as a '\n'
in Python. Also, a file so opened gains the attribute 'newlines';
the value for this attribute is one of None (no newline read yet),
'\r', '\n', '\r\n' or a tuple containing all the newline types seen.
'U' cannot be combined with 'w' or '+' mode.
"""
def close(self): # real signature unknown; restored from __doc__
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
pass
def fileno(self): # real signature unknown; restored from __doc__
"""
fileno() -> integer "file descriptor".
This is needed for lower-level file interfaces, such os.read().
"""
return 0
def flush(self): # real signature unknown; restored from __doc__
""" flush() -> None. Flush the internal I/O buffer. """
pass
def isatty(self): # real signature unknown; restored from __doc__
""" isatty() -> true or false. True if the file is connected to a tty device. """
return False
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def read(self, size=None): # real signature unknown; restored from __doc__
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" readinto() -> Undocumented. Don't use this; it may go away. """
pass
def readline(self, size=None): # real signature unknown; restored from __doc__
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
pass
def readlines(self, size=None): # real signature unknown; restored from __doc__
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
return []
def seek(self, offset, whence=None): # real signature unknown; restored from __doc__
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
pass
def tell(self): # real signature unknown; restored from __doc__
""" tell() -> current file position, an integer (may be a long integer). """
pass
def truncate(self, size=None): # real signature unknown; restored from __doc__
"""
truncate([size]) -> None. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
"""
pass
def write(self, p_str): # real signature unknown; restored from __doc__
"""
write(str) -> None. Write string str to file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written.
"""
pass
def writelines(self, sequence_of_strings): # real signature unknown; restored from __doc__
"""
writelines(sequence_of_strings) -> None. Write the strings to the file.
Note that newlines are not added. The sequence can be any iterable object
producing strings. This is equivalent to calling write() for each string.
"""
pass
def xreadlines(self): # real signature unknown; restored from __doc__
"""
xreadlines() -> returns self.
For backward compatibility. File objects now include the performance
optimizations previously implemented in the xreadlines module.
"""
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __enter__(self): # real signature unknown; restored from __doc__
""" __enter__() -> self. """
return self
def __exit__(self, *excinfo): # real signature unknown; restored from __doc__
""" __exit__(*excinfo) -> None. Closes the file. """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, name, mode=None, buffering=None): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
closed = property(lambda self: True)
"""True if the file is closed
:type: bool
"""
encoding = property(lambda self: '')
"""file encoding
:type: string
"""
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Unicode error handler"""
mode = property(lambda self: '')
"""file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added)
:type: string
"""
name = property(lambda self: '')
"""file name
:type: string
"""
newlines = property(lambda self: '')
"""end-of-line convention used in this file
:type: string
"""
softspace = property(lambda self: True)
"""flag indicating that a space needs to be printed; used by print
:type: bool
"""
class float(object):
"""
float(x) -> floating point number
Convert a string or number to a floating point number, if possible.
"""
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
float.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
float and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> (10.0).as_integer_ratio()
(10, 1)
>>> (0.0).as_integer_ratio()
(0, 1)
>>> (-.25).as_integer_ratio()
(-1, 4)
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Return self, the complex conjugate of any float. """
pass
@staticmethod # known case
def fromhex(string): # real signature unknown; restored from __doc__
"""
float.fromhex(string) -> float
Create a floating-point number from a hexadecimal string.
>>> float.fromhex('0x1.ffffp10')
2047.984375
>>> float.fromhex('-0x1p-1074')
-4.9406564584124654e-324
"""
return 0.0
def hex(self): # real signature unknown; restored from __doc__
"""
float.hex() -> string
Return a hexadecimal representation of a floating-point number.
>>> (-0.1).hex()
'-0x1.999999999999ap-4'
>>> 3.14159.hex()
'0x1.921f9f01b866ep+1'
"""
return ""
def is_integer(self, *args, **kwargs): # real signature unknown
""" Return True if the float is an integer. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
float.__format__(format_spec) -> string
Formats the float according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getformat__(self, typestr): # real signature unknown; restored from __doc__
"""
float.__getformat__(typestr) -> string
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. This function returns whichever of
'unknown', 'IEEE, big-endian' or 'IEEE, little-endian' best describes the
format of floating point numbers used by the C type named by typestr.
"""
return ""
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __setformat__(self, typestr, fmt): # real signature unknown; restored from __doc__
"""
float.__setformat__(typestr, fmt) -> None
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. fmt must be one of 'unknown',
'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be
one of the latter two if it appears to match the underlying C reality.
Override the automatic determination of C-level floating point type.
This affects how floats are converted to and from binary strings.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Return the Integral closest to x between 0 and x. """
pass
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class frozenset(object):
"""
frozenset() -> empty frozenset object
frozenset(iterable) -> frozenset object
Build an immutable unordered collection of unique elements.
"""
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of frozenset.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class list(object):
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
"""
def append(self, p_object): # real signature unknown; restored from __doc__
""" L.append(object) -- append object to end """
pass
def count(self, value): # real signature unknown; restored from __doc__
""" L.count(value) -> integer -- return number of occurrences of value """
return 0
def extend(self, iterable): # real signature unknown; restored from __doc__
""" L.extend(iterable) -- extend list by appending elements from the iterable """
pass
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
L.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def insert(self, index, p_object): # real signature unknown; restored from __doc__
""" L.insert(index, object) -- insert object before index """
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
L.pop([index]) -> item -- remove and return item at index (default last).
Raises IndexError if list is empty or index is out of range.
"""
pass
def remove(self, value): # real signature unknown; restored from __doc__
"""
L.remove(value) -- remove first occurrence of value.
Raises ValueError if the value is not present.
"""
pass
def reverse(self): # real signature unknown; restored from __doc__
""" L.reverse() -- reverse *IN PLACE* """
pass
def sort(self, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
"""
L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*;
cmp(x, y) -> -1, 0, 1
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, seq=()): # known special case of list.__init__
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self): # real signature unknown; restored from __doc__
""" L.__reversed__() -- return a reverse iterator over the list """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" L.__sizeof__() -- size of L in memory, in bytes """
pass
__hash__ = None
class long(object):
"""
long(x=0) -> long
long(x, base=10) -> long
Convert a number or string to a long integer, or return 0L if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4L
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
long.bit_length() -> int or long
Number of bits necessary to represent self in binary.
>>> bin(37L)
'0b100101'
>>> (37L).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any long. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x=0): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class memoryview(object):
"""
memoryview(object)
Create a new memoryview object which references the given object.
"""
def tobytes(self, *args, **kwargs): # real signature unknown
pass
def tolist(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, p_object): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class property(object):
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
"""
def deleter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the deleter on a property. """
pass
def getter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the getter on a property. """
pass
def setter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the setter on a property. """
pass
def __delete__(self, obj): # real signature unknown; restored from __doc__
""" descr.__delete__(obj) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, fget=None, fset=None, fdel=None, doc=None): # known special case of property.__init__
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __set__(self, obj, value): # real signature unknown; restored from __doc__
""" descr.__set__(obj, value) """
pass
fdel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class reversed(object):
"""
reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, sequence): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __length_hint__(self, *args, **kwargs): # real signature unknown
""" Private method returning an estimate of len(list(it)). """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class set(object):
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
"""
def add(self, *args, **kwargs): # real signature unknown
"""
Add an element to a set.
This has no effect if the element is already present.
"""
pass
def clear(self, *args, **kwargs): # real signature unknown
""" Remove all elements from this set. """
pass
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def difference_update(self, *args, **kwargs): # real signature unknown
""" Remove all elements of another set from this set. """
pass
def discard(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def intersection_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the intersection of itself and another. """
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def pop(self, *args, **kwargs): # real signature unknown
"""
Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
"""
pass
def remove(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def symmetric_difference_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the symmetric difference of itself and another. """
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def update(self, *args, **kwargs): # real signature unknown
""" Update a set with the union of itself and others. """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iand__(self, y): # real signature unknown; restored from __doc__
""" x.__iand__(y) <==> x&=y """
pass
def __init__(self, seq=()): # known special case of set.__init__
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
# (copied from class doc)
"""
pass
def __ior__(self, y): # real signature unknown; restored from __doc__
""" x.__ior__(y) <==> x|=y """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __ixor__(self, y): # real signature unknown; restored from __doc__
""" x.__ixor__(y) <==> x^=y """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
__hash__ = None
class slice(object):
"""
slice(stop)
slice(start, stop[, step])
Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).
"""
def indices(self, len): # real signature unknown; restored from __doc__
"""
S.indices(len) -> (start, stop, stride)
Assuming a sequence of length len, calculate the start and stop
indices, and the stride length of the extended slice described by
S. Out of bounds indices are clipped in a manner consistent with the
handling of normal slices.
"""
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
start = property(lambda self: 0)
""":type: int"""
step = property(lambda self: 0)
""":type: int"""
stop = property(lambda self: 0)
""":type: int"""
class staticmethod(object):
"""
staticmethod(function) -> method
Convert a function to be a static method.
A static method does not receive an implicit first argument.
To declare a static method, use this idiom:
class C:
def f(arg1, arg2, ...): ...
f = staticmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
Static methods in Python are similar to those found in Java or C++.
For a more advanced concept, see the classmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class super(object):
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, type1, type2=None): # known special case of super.__init__
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
__self_class__ = property(lambda self: type(object))
"""the type of the instance invoking super(); may be None
:type: type
"""
__self__ = property(lambda self: type(object))
"""the instance invoking super(); may be None
:type: type
"""
__thisclass__ = property(lambda self: type(object))
"""the class invoking super()
:type: type
"""
class tuple(object):
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
"""
def count(self, value): # real signature unknown; restored from __doc__
""" T.count(value) -> integer -- return number of occurrences of value """
return 0
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of tuple.__init__
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
class type(object):
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
"""
def mro(self): # real signature unknown; restored from __doc__
"""
mro() -> list
return a type's method resolution order
"""
return []
def __call__(self, *more): # real signature unknown; restored from __doc__
""" x.__call__(...) <==> x(...) """
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(cls, what, bases=None, dict=None): # known special case of type.__init__
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
# (copied from class doc)
"""
pass
def __instancecheck__(self): # real signature unknown; restored from __doc__
"""
__instancecheck__() -> bool
check if an object is an instance
"""
return False
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __subclasscheck__(self): # real signature unknown; restored from __doc__
"""
__subclasscheck__() -> bool
check if a class is a subclass
"""
return False
def __subclasses__(self): # real signature unknown; restored from __doc__
""" __subclasses__() -> list of immediate subclasses """
return []
__abstractmethods__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__bases__ = (
object,
)
__base__ = object
__basicsize__ = 872
__dictoffset__ = 264
__dict__ = None # (!) real value is ''
__flags__ = 2148423147
__itemsize__ = 40
__mro__ = (
None, # (!) forward: type, real value is ''
object,
)
__name__ = 'type'
__weakrefoffset__ = 368
class unicode(basestring):
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> unicode
Return a capitalized version of S, i.e. make the first character
have upper case and the rest lower case.
"""
return u""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> unicode
Return S centered in a Unicode string of length width. Padding is
done using the specified fill character (default is a space)
"""
return u""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
Unicode string S[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> string or unicode
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return ""
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> string or unicode
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
return ""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> unicode
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return u""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(self, *args, **kwargs): # known special case of unicode.format
"""
S.format(*args, **kwargs) -> unicode
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdecimal(self): # real signature unknown; restored from __doc__
"""
S.isdecimal() -> bool
Return True if there are only decimal characters in S,
False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isnumeric(self): # real signature unknown; restored from __doc__
"""
S.isnumeric() -> bool
Return True if there are only numeric characters in S,
False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. upper- and titlecase characters may only
follow uncased characters and lowercase characters only cased ones.
Return False otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> unicode
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return u""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> int
Return S left-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return 0
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> unicode
Return a copy of the string S converted to lowercase.
"""
return u""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> unicode
Return a copy of S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return u""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> unicode
Return S right-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return u""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string, starting at the end of the string and
working to the front. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> unicode
Return a copy of S with uppercase characters converted to lowercase
and vice versa.
"""
return u""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> unicode
Return a titlecased version of S, i.e. words start with title case
characters, all remaining cased characters have lower case.
"""
return u""
def translate(self, table): # real signature unknown; restored from __doc__
"""
S.translate(table) -> unicode
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, Unicode strings or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
return u""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> unicode
Return a copy of S converted to uppercase.
"""
return u""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> unicode
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return u""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> unicode
Return a formatted version of S as described by format_spec.
"""
return u""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=u'', encoding=None, errors='strict'): # known special case of unicode.__init__
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class xrange(object):
"""
xrange(stop) -> xrange object
xrange(start, stop[, step]) -> xrange object
Like range(), but instead of returning a list, returns an object that
generates the numbers in the range on demand. For looping, this is
slightly faster than range() and more memory efficient.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self, *args, **kwargs): # real signature unknown
""" Returns a reverse iterator. """
pass
# variables with complex values
Ellipsis = None # (!) real value is ''
NotImplemented = None # (!) real value is ''
| apache-2.0 |
allenlavoie/tensorflow | tensorflow/contrib/autograph/converters/logical_expressions.py | 5 | 4878 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions.
e.g. `a and b -> tf.logical_and(a, b)`. This is not done automatically in TF.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
# TODO(mdan): Properly extrack boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
class LogicalExpressionTransformer(transformer.Base):
"""Converts logical expressions to corresponding TF calls."""
def __init__(self, context):
super(LogicalExpressionTransformer, self).__init__(context)
# TODO(mdan): Look into replacing with bitwise operators instead.
# TODO(mdan): Skip replacing if the function is trivial.
self.op_mapping = {
gast.And: 'tf.logical_and',
gast.Eq: 'tf.equal',
gast.Gt: 'tf.greater',
gast.GtE: 'tf.greater_equal',
gast.Lt: 'tf.less',
gast.LtE: 'tf.less_equal',
gast.Not: 'tf.logical_not',
gast.NotEq: 'tf.not_equal',
gast.Or: 'tf.logical_or',
gast.USub: 'tf.negative',
gast.Is: 'autograph_utils.dynamic_is',
gast.IsNot: 'autograph_utils.dynamic_is_not'
}
def _expect_simple_symbol(self, operand):
if isinstance(operand, gast.Name):
return
if anno.hasanno(operand, SAFE_BOOLEAN_OPERAND):
return
raise NotImplementedError(
'only simple local variables are supported in logical and compound '
'comparison expressions; for example, we support "a or b" but not '
'"a.x or b"; for a workaround, assign the expression to a local '
'variable and use that instead, for example "tmp = a.x", "tmp or b"')
def _matching_func(self, operator):
op_type = type(operator)
mapped_op = self.op_mapping.get(op_type)
if not mapped_op:
raise NotImplementedError('operator %s is not yet supported' % op_type)
return mapped_op
def _as_function(self, func_name, args):
template = """
func_name(args)
"""
replacement = templates.replace_as_expression(
template, func_name=parser.parse_expression(func_name), args=args)
anno.setanno(replacement, SAFE_BOOLEAN_OPERAND, True)
return replacement
def visit_Compare(self, node):
node = self.generic_visit(node)
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
op_tree = None
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
while ops_and_comps:
op, right = ops_and_comps.pop(0)
binary_comparison = self._as_function(
self._matching_func(op), (left, right))
if isinstance(left, gast.Name) and isinstance(right, gast.Name):
anno.setanno(binary_comparison, SAFE_BOOLEAN_OPERAND, True)
if op_tree:
self._expect_simple_symbol(right)
op_tree = self._as_function('tf.logical_and',
(binary_comparison, op_tree))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
return self._as_function(self._matching_func(node.op), node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
self._expect_simple_symbol(right)
while node_values:
left = node_values.pop()
self._expect_simple_symbol(left)
right = self._as_function(self._matching_func(node.op), (left, right))
return right
def transform(node, context):
return LogicalExpressionTransformer(context).visit(node)
| apache-2.0 |
sangwonl/stage34 | webapp/api/handlers/stage.py | 1 | 6612 | from django.views import View
from django.conf import settings
from datetime import datetime
from api.helpers.mixins import AuthRequiredMixin
from api.helpers.http.jsend import JSENDSuccess, JSENDError
from api.models.resources import Membership, Stage
from libs.utils.model_ext import model_to_dict
from worker.tasks.deployment import (
task_provision_stage,
task_change_stage_status,
task_delete_stage,
task_refresh_stage
)
import pytz
import os
import json
import jwt
SERIALIZE_FIELDS = [
'id',
'title',
'endpoint',
'status',
'repo',
'default_branch',
'branch',
'created_at'
]
class StageRootHandler(AuthRequiredMixin, View):
def get(self, request, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stages_qs = Stage.objects.filter(org=org)
stages = [model_to_dict(s, fields=SERIALIZE_FIELDS) for s in stages_qs]
return JSENDSuccess(status_code=200, data=stages)
def post(self, request, *args, **kwargs):
json_body = json.loads(request.body)
title = json_body.get('title')
repo = json_body.get('repo')
branch= json_body.get('branch')
default_branch= json_body.get('default_branch')
run_on_create = json_body.get('run_on_create', False)
if not (title and repo and default_branch and branch):
return JSENDError(status_code=400, msg='invalid stage info')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = Stage.objects.create(
org=org,
title=title,
repo=repo,
default_branch=default_branch,
branch=branch
)
github_access_key = request.user.jwt_payload.get('access_token')
task_provision_stage.apply_async(args=[github_access_key, stage.id, repo, branch, run_on_create])
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
class StageDetailHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=200, data=stage_dict)
def put(self, request, stage_id, *args, **kwargs):
json_body = json.loads(request.body)
new_status = json_body.get('status')
if not new_status or new_status not in ('running', 'paused'):
return JSENDError(status_code=400, msg='invalid stage status')
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
cur_status = stage.status
if cur_status != new_status:
github_access_key = request.user.jwt_payload.get('access_token')
task_change_stage_status.apply_async(args=[github_access_key, stage_id, new_status])
new_status = 'changing'
stage.title = json_body.get('title', stage.title)
stage.repo = json_body.get('repo', stage.repo)
stage.default_branch = json_body.get('default_branch', stage.default_branch)
stage.branch = json_body.get('branch', stage.branch)
stage.status = new_status
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
def delete(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
stage.status = 'deleting'
stage.save()
github_access_key = request.user.jwt_payload.get('access_token')
task_delete_stage.apply_async(args=[github_access_key, stage_id])
return JSENDSuccess(status_code=204)
class StageLogHandler(AuthRequiredMixin, View):
def get_log_path(self, stage_id):
return os.path.join(settings.STAGE_REPO_HOME, stage_id, 'output.log')
def get(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
log_path = self.get_log_path(stage_id)
if not os.path.exists(log_path):
return JSENDError(status_code=404, msg='log file not found')
log_msgs = []
with open(log_path, 'rt') as f:
log_msg = f.read()
log_msgs = [l for l in log_msg.split('\n') if l]
ts = os.path.getmtime(log_path)
tz = pytz.timezone(settings.TIME_ZONE)
dt = datetime.fromtimestamp(ts, tz=tz)
log_data = {'log_messages': log_msgs, 'log_time': dt.isoformat()}
return JSENDSuccess(status_code=200, data=log_data)
class StageRefreshHandler(AuthRequiredMixin, View):
def get_stage(self, org, stage_id):
try:
stage = Stage.objects.get(org=org, id=stage_id)
except Stage.DoesNotExist:
return None
return stage
def post(self, request, stage_id, *args, **kwargs):
org = Membership.get_org_of_user(request.user)
if not org:
return JSENDError(status_code=400, msg='org not found')
stage = self.get_stage(org, stage_id)
if not stage:
return JSENDError(status_code=404, msg='stage not found')
github_access_key = request.user.jwt_payload.get('access_token')
task_refresh_stage.apply_async(args=[github_access_key, stage_id])
stage.status = 'changing'
stage.save()
stage_dict = model_to_dict(stage, fields=SERIALIZE_FIELDS)
return JSENDSuccess(status_code=204)
| mit |
martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/distutils/command/sdist.py | 42 | 18344 | """distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = "$Id$"
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import (DistutilsPlatformError, DistutilsOptionError,
DistutilsTemplateError)
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = {'posix': 'gztar',
'nt': 'zip' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior:
# the file list is recalculated everytime because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior.
template_exists = os.path.isfile(self.template)
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
# pruning out vcs directories
# both separators are used under win32
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if os.path.isfile(self.manifest):
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
if first_line != '# file GENERATED by distutils, do NOT edit\n':
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
| gpl-3.0 |
jimkmc/micropython | tests/bytecode/pylib-tests/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| mit |
globaltoken/globaltoken | test/functional/test_framework/authproxy.py | 1 | 7759 | # Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to globaltokend.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
| mit |
jtomasek/tuskar-ui-1 | tuskar_ui/infrastructure/resource_management/resource_classes/workflows.py | 1 | 12384 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from tuskar_ui import api as tuskar
import tuskar_ui.workflows
import re
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import FlavorTemplatesTable
from tuskar_ui.infrastructure. \
resource_management.resource_classes.tables import RacksTable
class ResourceClassInfoAndFlavorsAction(workflows.Action):
name = forms.CharField(max_length=255,
label=_("Class Name"),
help_text="",
required=True)
service_type = forms.ChoiceField(label=_('Class Type'),
required=True,
choices=[('', ''),
('compute',
('Compute')),
('not_compute',
('Non Compute')),
],
widget=forms.Select(
attrs={'class': 'switchable'})
)
image = forms.ChoiceField(label=_('Provisioning Image'),
required=True,
choices=[('compute-img', ('overcloud-compute'))],
widget=forms.Select(
attrs={'class': 'switchable'})
)
def clean(self):
cleaned_data = super(ResourceClassInfoAndFlavorsAction,
self).clean()
name = cleaned_data.get('name')
resource_class_id = self.initial.get('resource_class_id', None)
try:
resource_classes = tuskar.ResourceClass.list(self.request)
except Exception:
resource_classes = []
msg = _('Unable to get resource class list')
exceptions.check_message(["Connection", "refused"], msg)
raise
for resource_class in resource_classes:
if resource_class.name == name and \
resource_class_id != resource_class.id:
raise forms.ValidationError(
_('The name "%s" is already used by'
' another resource class.')
% name
)
return cleaned_data
class Meta:
name = _("Class Settings")
help_text = _("From here you can fill the class "
"settings and add flavors to class.")
class CreateResourceClassInfoAndFlavors(tuskar_ui.workflows.TableStep):
table_classes = (FlavorTemplatesTable,)
action_class = ResourceClassInfoAndFlavorsAction
template_name = 'infrastructure/resource_management/resource_classes/'\
'_resource_class_info_and_flavors_step.html'
contributes = ("name", "service_type", "flavors_object_ids",
'max_vms')
def contribute(self, data, context):
request = self.workflow.request
if data:
context["flavors_object_ids"] =\
request.POST.getlist("flavors_object_ids")
# todo: lsmola django can't parse dictionaruy from POST
# this should be rewritten to django formset
context["max_vms"] = {}
for index, value in request.POST.items():
match = re.match(
'^(flavors_object_ids__max_vms__(.*?))$',
index)
if match:
context["max_vms"][match.groups()[1]] = value
context.update(data)
return context
def get_flavors_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['flavors'].active_multi_select_values = \
resource_class.flavortemplates_ids
all_flavors = resource_class.all_flavors
else:
all_flavors = tuskar.FlavorTemplate.list(
self.workflow.request)
except Exception:
all_flavors = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve resource flavors list.'))
return all_flavors
class RacksAction(workflows.Action):
class Meta:
name = _("Racks")
class CreateRacks(tuskar_ui.workflows.TableStep):
table_classes = (RacksTable,)
action_class = RacksAction
contributes = ("racks_object_ids")
template_name = 'infrastructure/resource_management/'\
'resource_classes/_racks_step.html'
def contribute(self, data, context):
request = self.workflow.request
context["racks_object_ids"] =\
request.POST.getlist("racks_object_ids")
context.update(data)
return context
def get_racks_data(self):
try:
resource_class_id = self.workflow.context.get("resource_class_id")
if resource_class_id:
resource_class = tuskar.ResourceClass.get(
self.workflow.request,
resource_class_id)
# TODO(lsmola ugly interface, rewrite)
self._tables['racks'].active_multi_select_values = \
resource_class.racks_ids
racks = \
resource_class.all_racks
else:
racks = \
tuskar.Rack.list(self.workflow.request, True)
except Exception:
racks = []
exceptions.handle(self.workflow.request,
_('Unable to retrieve racks list.'))
return racks
class ResourceClassWorkflowMixin:
# FIXME active tabs coflict
# When on page with tabs, the workflow with more steps is used,
# there is a conflict of active tabs and it always shows the
# first tab after an action. So I explicitly specify to what
# tab it should redirect after action, until the coflict will
# be fixed in Horizon.
def get_index_url(self):
"""This url is used both as success and failure url"""
return "%s?tab=resource_management_tabs__resource_classes_tab" %\
reverse("horizon:infrastructure:resource_management:index")
def get_success_url(self):
return self.get_index_url()
def get_failure_url(self):
return self.get_index_url()
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def _get_flavors(self, request, data):
flavors = []
flavor_ids = data.get('flavors_object_ids') or []
max_vms = data.get('max_vms')
resource_class_name = data['name']
for template_id in flavor_ids:
template = tuskar.FlavorTemplate.get(request, template_id)
capacities = []
for c in template.capacities:
capacities.append({'name': c.name,
'value': str(c.value),
'unit': c.unit})
# FIXME: tuskar uses resource-class-name prefix for flavors,
# e.g. m1.large, we add rc name to the template name:
flavor_name = "%s.%s" % (resource_class_name, template.name)
flavors.append({'name': flavor_name,
'max_vms': max_vms.get(template.id, None),
'capacities': capacities})
return flavors
def _add_racks(self, request, data, resource_class):
ids_to_add = data.get('racks_object_ids') or []
resource_class.set_racks(request, ids_to_add)
class CreateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (CreateResourceClassInfoAndFlavors,
CreateRacks)
slug = "create_resource_class"
name = _("Create Class")
finalize_button_name = _("Create Class")
success_message = _('Created class "%s".')
failure_message = _('Unable to create class "%s".')
def _create_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.create(
request,
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._create_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class UpdateResourceClassInfoAndFlavors(CreateResourceClassInfoAndFlavors):
depends_on = ("resource_class_id",)
class UpdateRacks(CreateRacks):
depends_on = ("resource_class_id",)
class UpdateResourceClass(ResourceClassWorkflowMixin, workflows.Workflow):
default_steps = (UpdateResourceClassInfoAndFlavors,
UpdateRacks)
slug = "update_resource_class"
name = _("Update Class")
finalize_button_name = _("Update Class")
success_message = _('Updated class "%s".')
failure_message = _('Unable to update class "%s".')
def _update_resource_class_info(self, request, data):
try:
flavors = self._get_flavors(request, data)
return tuskar.ResourceClass.update(
request,
data['resource_class_id'],
name=data['name'],
service_type=data['service_type'],
flavors=flavors)
except Exception:
redirect = self.get_failure_url()
exceptions.handle(request,
_('Unable to create resource class.'),
redirect=redirect)
return None
def handle(self, request, data):
resource_class = self._update_resource_class_info(request, data)
self._add_racks(request, data, resource_class)
return True
class DetailUpdateWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__overview" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateRacksWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__racks" % (
reverse(url, args=(self.context["resource_class_id"])))
class UpdateFlavorsWorkflow(UpdateResourceClass):
def get_index_url(self):
"""This url is used both as success and failure url"""
url = "horizon:infrastructure:resource_management:resource_classes:"\
"detail"
return "%s?tab=resource_class_details__flavors" % (
reverse(url, args=(self.context["resource_class_id"])))
| apache-2.0 |
khushboo9293/postorius | src/postorius/models.py | 2 | 11004 | # -*- coding: utf-8 -*-
# Copyright (C) 1998-2015 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import random
import hashlib
import logging
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from django.dispatch import receiver
from django.db import models
from django.http import Http404
from django.template import Context
from django.template.loader import get_template
from mailmanclient import MailmanConnectionError
from postorius.utils import get_client
from urllib2 import HTTPError
logger = logging.getLogger(__name__)
@receiver(post_save, sender=User)
def create_mailman_user(sender, **kwargs):
if kwargs.get('created'):
autocreate = False
try:
autocreate = settings.AUTOCREATE_MAILMAN_USER
except AttributeError:
pass
if autocreate:
user = kwargs.get('instance')
client = get_client()
try:
client.create_user(user.email, None, None)
except HTTPError:
pass
class MailmanApiError(Exception):
"""Raised if the API is not available.
"""
pass
class Mailman404Error(Exception):
"""Proxy exception. Raised if the API returns 404."""
pass
class MailmanRestManager(object):
"""Manager class to give a model class CRUD access to the API.
Returns objects (or lists of objects) retrived from the API.
"""
def __init__(self, resource_name, resource_name_plural, cls_name=None):
self.resource_name = resource_name
self.resource_name_plural = resource_name_plural
def all(self):
try:
return getattr(get_client(), self.resource_name_plural)
except AttributeError:
raise MailmanApiError
except MailmanConnectionError, e:
raise MailmanApiError(e)
def get(self, **kwargs):
try:
method = getattr(get_client(), 'get_' + self.resource_name)
return method(**kwargs)
except AttributeError, e:
raise MailmanApiError(e)
except HTTPError, e:
if e.code == 404:
raise Mailman404Error('Mailman resource could not be found.')
else:
raise
except MailmanConnectionError, e:
raise MailmanApiError(e)
def get_or_404(self, **kwargs):
"""Similar to `self.get` but raises standard Django 404 error.
"""
try:
return self.get(**kwargs)
except Mailman404Error:
raise Http404
except MailmanConnectionError, e:
raise MailmanApiError(e)
def create(self, **kwargs):
try:
method = getattr(get_client(), 'create_' + self.resource_name)
return method(**kwargs)
except AttributeError, e:
raise MailmanApiError(e)
except HTTPError, e:
if e.code == 409:
raise MailmanApiError
else:
raise
except MailmanConnectionError:
raise MailmanApiError
def delete(self):
"""Not implemented since the objects returned from the API
have a `delete` method of their own.
"""
pass
class MailmanListManager(MailmanRestManager):
def __init__(self):
super(MailmanListManager, self).__init__('list', 'lists')
def all(self, only_public=False):
try:
objects = getattr(get_client(), self.resource_name_plural)
except AttributeError:
raise MailmanApiError
except MailmanConnectionError, e:
raise MailmanApiError(e)
if only_public:
public = []
for obj in objects:
if obj.settings.get('advertised', False):
public.append(obj)
return public
else:
return objects
def by_mail_host(self, mail_host, only_public=False):
objects = self.all(only_public)
host_objects = []
for obj in objects:
if obj.mail_host == mail_host:
host_objects.append(obj)
return host_objects
class MailmanRestModel(object):
"""Simple REST Model class to make REST API calls Django style.
"""
MailmanApiError = MailmanApiError
DoesNotExist = Mailman404Error
def __init__(self, **kwargs):
self.kwargs = kwargs
def save(self):
"""Proxy function for `objects.create`.
(REST API uses `create`, while Django uses `save`.)
"""
self.objects.create(**self.kwargs)
class Domain(MailmanRestModel):
"""Domain model class.
"""
objects = MailmanRestManager('domain', 'domains')
class List(MailmanRestModel):
"""List model class.
"""
objects = MailmanListManager()
class MailmanUser(MailmanRestModel):
"""MailmanUser model class.
"""
objects = MailmanRestManager('user', 'users')
class Member(MailmanRestModel):
"""Member model class.
"""
objects = MailmanRestManager('member', 'members')
class AddressConfirmationProfileManager(models.Manager):
"""
Manager class for AddressConfirmationProfile.
"""
def create_profile(self, email, user):
# Create or update a profile
# Guarantee an email bytestr type that can be fed to hashlib.
email_str = email
if isinstance(email_str, unicode):
email_str = email_str.encode('utf-8')
activation_key = hashlib.sha1(
str(random.random())+email_str).hexdigest()
# Make now tz naive (we don't care about the timezone)
now = datetime.now().replace(tzinfo=None)
# Either update an existing profile record for the given email address
try:
profile = self.get(email=email)
profile.activation_key = activation_key
profile.created = now
profile.save()
# ... or create a new one.
except AddressConfirmationProfile.DoesNotExist:
profile = self.create(email=email,
activation_key=activation_key,
user=user,
created=now)
return profile
class AddressConfirmationProfile(models.Model):
"""
Profile model for temporarily storing an activation key to register
an email address.
"""
email = models.EmailField()
activation_key = models.CharField(max_length=40)
created = models.DateTimeField()
user = models.ForeignKey(User)
objects = AddressConfirmationProfileManager()
def __unicode__(self):
return u'Address Confirmation Profile for {0}'.format(self.email)
@property
def is_expired(self):
"""
a profile expires after 1 day by default.
This can be configured in the settings.
>>> EMAIL_CONFIRMATION_EXPIRATION_DELTA = timedelta(days=2)
"""
expiration_delta = getattr(
settings, 'EMAIL_CONFIRMATION_EXPIRATION_DELTA', timedelta(days=1))
age = datetime.now().replace(tzinfo=None) - \
self.created.replace(tzinfo=None)
return age > expiration_delta
def _create_host_url(self, request):
# Create the host url
protocol = 'https'
if not request.is_secure():
protocol = 'http'
server_name = request.META['SERVER_NAME']
if server_name[-1] == '/':
server_name = server_name[:len(server_name) - 1]
return '{0}://{1}'.format(protocol, server_name)
def send_confirmation_link(self, request, template_context=None,
template_path=None):
"""
Send out a message containing a link to activate the given address.
The following settings are recognized:
>>> EMAIL_CONFIRMATION_TEMPLATE = 'postorius/address_confirmation_message.txt'
>>> EMAIL_CONFIRMATION_FROM = '[email protected]'
>>> EMAIL_CONFIRMATION_SUBJECT = 'Confirmation needed'
:param request: The HTTP request object.
:type request: HTTPRequest
:param template_context: The context used when rendering the template.
Falls back to host url and activation link.
:type template_context: django.template.Context
"""
# create the host url and the activation link need for the template
host_url = self._create_host_url(request)
# Get the url string from url conf.
url = reverse('address_activation_link',
kwargs={'activation_key': self.activation_key})
activation_link = '{0}{1}'.format(host_url, url)
# Detect the right template path, either from the param,
# the setting or the default
if not template_path:
template_path = getattr(settings,
'EMAIL_CONFIRMATION_TEMPLATE',
'postorius/address_confirmation_message.txt')
# Create a template context (if there is none) containing
# the activation_link and the host_url.
if not template_context:
template_context = Context(
{'activation_link': activation_link, 'host_url': host_url})
email_subject = getattr(
settings, 'EMAIL_CONFIRMATION_SUBJECT', u'Confirmation needed')
try:
sender_address = getattr(settings, 'EMAIL_CONFIRMATION_FROM')
except AttributeError:
# settings.EMAIL_CONFIRMATION_FROM is not defined, fallback
# settings.DEFAULT_EMAIL_FROM as mentioned in the django
# docs. If that also fails, raise a `ImproperlyConfigured` Error.
try:
sender_address = getattr(settings, 'DEFAULT_FROM_EMAIL')
except AttributeError:
raise ImproperlyConfigured
send_mail(email_subject,
get_template(template_path).render(template_context),
sender_address,
[self.email])
| gpl-3.0 |
chaso137/wot-xvm | src/xpm/xpm/mods/lib/tlslite/utils/pem.py | 116 | 3587 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
from .compat import *
import binascii
#This code is shared with tackpy (somewhat), so I'd rather make minimal
#changes, and preserve the use of a2b_base64 throughout.
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = ""
while s1:
s2 += s1[:64] + "\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name) + s2 + \
("-----END %s-----\n" % name)
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
| gpl-3.0 |
waytai/odoo | openerp/addons/base/res/res_config.py | 243 | 30944 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context={}):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, uid, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, uid, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, uid, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ppmt/Crust | flask/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py | 203 | 12894 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote and ' ' in executable:
executable = '"%s"' % executable
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp):
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| gpl-2.0 |
kuszmaul/PerconaFT | src/tests/ipm.py | 92 | 1263 | #!/usr/local/bin/python2.6
import sys
import os
import pexpect
import getpass
#
# remote_cmd
#
nameaddr='[email protected]'
passwd='admn'
def IPM_cmd(cmds):
# password handling
ssh_newkey = 'Are you sure you want to continue connecting'
p=pexpect.spawn('ssh %s' % nameaddr, timeout=60)
i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
if i==0:
p.sendline('yes')
i=p.expect([ssh_newkey,'Password:',pexpect.EOF])
if i==1:
p.sendline(passwd)
elif i==2:
print "I either got key or connection timeout"
pass
# run command(s)
i = p.expect('Sentry:')
for cmd in cmds:
if i==0:
p.sendline(cmd)
else:
print 'p.expect saw', p.before
i = p.expect('Sentry:')
print p.before
# close session
p.sendline('quit')
p.expect(pexpect.EOF)
return 0
def IPM_power_on():
IPM_cmd(['on all'])
def IPM_power_off():
IPM_cmd(['off all'])
def main(argv):
# passwd = getpass.getpass('password for %s:' % (nameaddr))
if argv[1] == 'on':
IPM_power_on()
elif argv[1] == 'off':
IPM_power_off()
else:
IPM_cmd(argv[1:])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| agpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python-build/python-libs/gdata/src/gdata/Crypto/Protocol/Chaffing.py | 226 | 9467 | """This file implements the chaffing algorithm.
Winnowing and chaffing is a technique for enhancing privacy without requiring
strong encryption. In short, the technique takes a set of authenticated
message blocks (the wheat) and adds a number of chaff blocks which have
randomly chosen data and MAC fields. This means that to an adversary, the
chaff blocks look as valid as the wheat blocks, and so the authentication
would have to be performed on every block. By tailoring the number of chaff
blocks added to the message, the sender can make breaking the message
computationally infeasible. There are many other interesting properties of
the winnow/chaff technique.
For example, say Alice is sending a message to Bob. She packetizes the
message and performs an all-or-nothing transformation on the packets. Then
she authenticates each packet with a message authentication code (MAC). The
MAC is a hash of the data packet, and there is a secret key which she must
share with Bob (key distribution is an exercise left to the reader). She then
adds a serial number to each packet, and sends the packets to Bob.
Bob receives the packets, and using the shared secret authentication key,
authenticates the MACs for each packet. Those packets that have bad MACs are
simply discarded. The remainder are sorted by serial number, and passed
through the reverse all-or-nothing transform. The transform means that an
eavesdropper (say Eve) must acquire all the packets before any of the data can
be read. If even one packet is missing, the data is useless.
There's one twist: by adding chaff packets, Alice and Bob can make Eve's job
much harder, since Eve now has to break the shared secret key, or try every
combination of wheat and chaff packet to read any of the message. The cool
thing is that Bob doesn't need to add any additional code; the chaff packets
are already filtered out because their MACs don't match (in all likelihood --
since the data and MACs for the chaff packets are randomly chosen it is
possible, but very unlikely that a chaff MAC will match the chaff data). And
Alice need not even be the party adding the chaff! She could be completely
unaware that a third party, say Charles, is adding chaff packets to her
messages as they are transmitted.
For more information on winnowing and chaffing see this paper:
Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption"
http://theory.lcs.mit.edu/~rivest/chaffing.txt
"""
__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $"
from Crypto.Util.number import bytes_to_long
class Chaff:
"""Class implementing the chaff adding algorithm.
Methods for subclasses:
_randnum(size):
Returns a randomly generated number with a byte-length equal
to size. Subclasses can use this to implement better random
data and MAC generating algorithms. The default algorithm is
probably not very cryptographically secure. It is most
important that the chaff data does not contain any patterns
that can be used to discern it from wheat data without running
the MAC.
"""
def __init__(self, factor=1.0, blocksper=1):
"""Chaff(factor:float, blocksper:int)
factor is the number of message blocks to add chaff to,
expressed as a percentage between 0.0 and 1.0. blocksper is
the number of chaff blocks to include for each block being
chaffed. Thus the defaults add one chaff block to every
message block. By changing the defaults, you can adjust how
computationally difficult it could be for an adversary to
brute-force crack the message. The difficulty is expressed
as:
pow(blocksper, int(factor * number-of-blocks))
For ease of implementation, when factor < 1.0, only the first
int(factor*number-of-blocks) message blocks are chaffed.
"""
if not (0.0<=factor<=1.0):
raise ValueError, "'factor' must be between 0.0 and 1.0"
if blocksper < 0:
raise ValueError, "'blocksper' must be zero or more"
self.__factor = factor
self.__blocksper = blocksper
def chaff(self, blocks):
"""chaff( [(serial-number:int, data:string, MAC:string)] )
: [(int, string, string)]
Add chaff to message blocks. blocks is a list of 3-tuples of the
form (serial-number, data, MAC).
Chaff is created by choosing a random number of the same
byte-length as data, and another random number of the same
byte-length as MAC. The message block's serial number is
placed on the chaff block and all the packet's chaff blocks
are randomly interspersed with the single wheat block. This
method then returns a list of 3-tuples of the same form.
Chaffed blocks will contain multiple instances of 3-tuples
with the same serial number, but the only way to figure out
which blocks are wheat and which are chaff is to perform the
MAC hash and compare values.
"""
chaffedblocks = []
# count is the number of blocks to add chaff to. blocksper is the
# number of chaff blocks to add per message block that is being
# chaffed.
count = len(blocks) * self.__factor
blocksper = range(self.__blocksper)
for i, wheat in map(None, range(len(blocks)), blocks):
# it shouldn't matter which of the n blocks we add chaff to, so for
# ease of implementation, we'll just add them to the first count
# blocks
if i < count:
serial, data, mac = wheat
datasize = len(data)
macsize = len(mac)
addwheat = 1
# add chaff to this block
for j in blocksper:
import sys
chaffdata = self._randnum(datasize)
chaffmac = self._randnum(macsize)
chaff = (serial, chaffdata, chaffmac)
# mix up the order, if the 5th bit is on then put the
# wheat on the list
if addwheat and bytes_to_long(self._randnum(16)) & 0x40:
chaffedblocks.append(wheat)
addwheat = 0
chaffedblocks.append(chaff)
if addwheat:
chaffedblocks.append(wheat)
else:
# just add the wheat
chaffedblocks.append(wheat)
return chaffedblocks
def _randnum(self, size):
# TBD: Not a very secure algorithm.
# TBD: size * 2 to work around possible bug in RandomPool
from Crypto.Util import randpool
import time
pool = randpool.RandomPool(size * 2)
while size > pool.entropy:
pass
# we now have enough entropy in the pool to get size bytes of random
# data... well, probably
return pool.get_bytes(size)
if __name__ == '__main__':
text = """\
We hold these truths to be self-evident, that all men are created equal, that
they are endowed by their Creator with certain unalienable Rights, that among
these are Life, Liberty, and the pursuit of Happiness. That to secure these
rights, Governments are instituted among Men, deriving their just powers from
the consent of the governed. That whenever any Form of Government becomes
destructive of these ends, it is the Right of the People to alter or to
abolish it, and to institute new Government, laying its foundation on such
principles and organizing its powers in such form, as to them shall seem most
likely to effect their Safety and Happiness.
"""
print 'Original text:\n=========='
print text
print '=========='
# first transform the text into packets
blocks = [] ; size = 40
for i in range(0, len(text), size):
blocks.append( text[i:i+size] )
# now get MACs for all the text blocks. The key is obvious...
print 'Calculating MACs...'
from Crypto.Hash import HMAC, SHA
key = 'Jefferson'
macs = [HMAC.new(key, block, digestmod=SHA).digest()
for block in blocks]
assert len(blocks) == len(macs)
# put these into a form acceptable as input to the chaffing procedure
source = []
m = map(None, range(len(blocks)), blocks, macs)
print m
for i, data, mac in m:
source.append((i, data, mac))
# now chaff these
print 'Adding chaff...'
c = Chaff(factor=0.5, blocksper=2)
chaffed = c.chaff(source)
from base64 import encodestring
# print the chaffed message blocks. meanwhile, separate the wheat from
# the chaff
wheat = []
print 'chaffed message blocks:'
for i, data, mac in chaffed:
# do the authentication
h = HMAC.new(key, data, digestmod=SHA)
pmac = h.digest()
if pmac == mac:
tag = '-->'
wheat.append(data)
else:
tag = ' '
# base64 adds a trailing newline
print tag, '%3d' % i, \
repr(data), encodestring(mac)[:-1]
# now decode the message packets and check it against the original text
print 'Undigesting wheat...'
newtext = "".join(wheat)
if newtext == text:
print 'They match!'
else:
print 'They differ!'
| apache-2.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/py2.5/lib/python2.5/distutils/command/bdist_msi.py | 88 | 30900 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005, 2006 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi (Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if self.target_version:
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
else:
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.pre_install_script:
raise DistutilsOptionError, "the pre-install-script feature is not yet implemented"
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
self.install_script_key = None
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (get_platform(), target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
product_name = "Python %s %s" % (self.target_version,
self.distribution.get_fullname())
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
self.distribution.dist_files.append(('bdist_msi', self.target_version, fullname))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
f = Feature(db, "default", "Default Feature", "Everything", 1, directory="TARGETDIR")
f.set_current()
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
db.Commit()
todo = [root]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
newdir = Directory(db, cab, dir, file, file, "%s|%s" % (dir.make_short(file), file))
todo.append(newdir)
else:
key = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError, "Multiple files with name %s" % file
self.install_script_key = '[#%s]' % key
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE, PYTHON.USER, PYTHONDIR and PYTHON will be set
in both the execute and UI sequences; PYTHONDIR will be set from
PYTHON.USER if defined, else from PYTHON.MACHINE.
PYTHON is PYTHONDIR\python.exe"""
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % self.target_version
add_data(self.db, "RegLocator",
[("python.machine", 2, install_path, None, 2),
("python.user", 1, install_path, None, 2)])
add_data(self.db, "AppSearch",
[("PYTHON.MACHINE", "python.machine"),
("PYTHON.USER", "python.user")])
add_data(self.db, "CustomAction",
[("PythonFromMachine", 51+256, "PYTHONDIR", "[PYTHON.MACHINE]"),
("PythonFromUser", 51+256, "PYTHONDIR", "[PYTHON.USER]"),
("PythonExe", 51+256, "PYTHON", "[PYTHONDIR]\\python.exe"),
("InitialTargetDir", 51+256, "TARGETDIR", "[PYTHONDIR]")])
add_data(self.db, "InstallExecuteSequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
add_data(self.db, "InstallUISequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
def add_scripts(self):
if self.install_script:
add_data(self.db, "CustomAction",
[("install_script", 50, "PYTHON", self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[("install_script", "NOT Installed", 6800)])
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
version = sys.version[:3]+" "
seldlg.text("Hint", 15, 30, 300, 40, 3,
"The destination directory should contain a Python %sinstallation" % version)
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("SetTargetPath", "TARGETDIR", ordering=1)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=2)
c.event("EndDialog", "Return", ordering=3)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 15, 70, 272, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 15, 90, 308, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 15, 230, 306, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.msi" %
(fullname, self.target_version))
return installer_name
| mit |
vladimir-ipatov/ganeti | test/py/ganeti.impexpd_unittest.py | 9 | 8733 | #!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.impexpd"""
import os
import sys
import re
import unittest
import socket
from ganeti import constants
from ganeti import objects
from ganeti import compat
from ganeti import utils
from ganeti import errors
from ganeti import impexpd
import testutils
class CmdBuilderConfig(objects.ConfigObject):
__slots__ = [
"bind",
"key",
"cert",
"ca",
"host",
"port",
"ipv4",
"ipv6",
"compress",
"magic",
"connect_timeout",
"connect_retries",
"cmd_prefix",
"cmd_suffix",
]
def CheckCmdWord(cmd, word):
wre = re.compile(r"\b%s\b" % re.escape(word))
return compat.any(wre.search(i) for i in cmd)
class TestCommandBuilder(unittest.TestCase):
def test(self):
for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
if mode == constants.IEM_IMPORT:
comprcmd = "gunzip"
elif mode == constants.IEM_EXPORT:
comprcmd = "gzip"
for compress in [constants.IEC_NONE, constants.IEC_GZIP]:
for magic in [None, 10 * "-", "HelloWorld", "J9plh4nFo2",
"24A02A81-2264-4B51-A882-A2AB9D85B420"]:
opts = CmdBuilderConfig(magic=magic, compress=compress)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
magic_cmd = builder._GetMagicCommand()
dd_cmd = builder._GetDdCommand()
if magic:
self.assert_(("M=%s" % magic) in magic_cmd)
self.assert_(("M=%s" % magic) in dd_cmd)
else:
self.assertFalse(magic_cmd)
for host in ["localhost", "198.51.100.4", "192.0.2.99"]:
for port in [0, 1, 1234, 7856, 45452]:
for cmd_prefix in [None, "PrefixCommandGoesHere|",
"dd if=/dev/hda bs=1048576 |"]:
for cmd_suffix in [None, "< /some/file/name",
"| dd of=/dev/null"]:
opts = CmdBuilderConfig(host=host, port=port, compress=compress,
cmd_prefix=cmd_prefix,
cmd_suffix=cmd_suffix)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
# Check complete command
cmd = builder.GetCommand()
self.assert_(isinstance(cmd, list))
if compress == constants.IEC_GZIP:
self.assert_(CheckCmdWord(cmd, comprcmd))
if cmd_prefix is not None:
self.assert_(compat.any(cmd_prefix in i for i in cmd))
if cmd_suffix is not None:
self.assert_(compat.any(cmd_suffix in i for i in cmd))
# Check socat command
socat_cmd = builder._GetSocatCommand()
if mode == constants.IEM_IMPORT:
ssl_addr = socat_cmd[-2].split(",")
self.assert_(("OPENSSL-LISTEN:%s" % port) in ssl_addr)
elif mode == constants.IEM_EXPORT:
ssl_addr = socat_cmd[-1].split(",")
self.assert_(("OPENSSL:%s:%s" % (host, port)) in ssl_addr)
self.assert_("verify=1" in ssl_addr)
def testIPv6(self):
for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
opts = CmdBuilderConfig(host="localhost", port=6789,
ipv4=False, ipv6=False)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
cmd = builder._GetSocatCommand()
self.assert_(compat.all("pf=" not in i for i in cmd))
# IPv4
opts = CmdBuilderConfig(host="localhost", port=6789,
ipv4=True, ipv6=False)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
cmd = builder._GetSocatCommand()
self.assert_(compat.any(",pf=ipv4" in i for i in cmd))
# IPv6
opts = CmdBuilderConfig(host="localhost", port=6789,
ipv4=False, ipv6=True)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
cmd = builder._GetSocatCommand()
self.assert_(compat.any(",pf=ipv6" in i for i in cmd))
# IPv4 and IPv6
opts = CmdBuilderConfig(host="localhost", port=6789,
ipv4=True, ipv6=True)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
self.assertRaises(AssertionError, builder._GetSocatCommand)
def testCommaError(self):
opts = CmdBuilderConfig(host="localhost", port=1234,
ca="/some/path/with,a/,comma")
for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
self.assertRaises(errors.GenericError, builder.GetCommand)
def testOptionLengthError(self):
testopts = [
CmdBuilderConfig(bind="0.0.0.0" + ("A" * impexpd.SOCAT_OPTION_MAXLEN),
port=1234, ca="/tmp/ca"),
CmdBuilderConfig(host="localhost", port=1234,
ca="/tmp/ca" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)),
CmdBuilderConfig(host="localhost", port=1234,
key="/tmp/key" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)),
]
for opts in testopts:
for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
self.assertRaises(errors.GenericError, builder.GetCommand)
opts.host = "localhost" + ("A" * impexpd.SOCAT_OPTION_MAXLEN)
builder = impexpd.CommandBuilder(constants.IEM_EXPORT, opts, 1, 2, 3)
self.assertRaises(errors.GenericError, builder.GetCommand)
def testModeError(self):
mode = "foobarbaz"
assert mode not in [constants.IEM_IMPORT, constants.IEM_EXPORT]
opts = CmdBuilderConfig(host="localhost", port=1234)
builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
self.assertRaises(errors.GenericError, builder.GetCommand)
class TestVerifyListening(unittest.TestCase):
def test(self):
self.assertEqual(impexpd._VerifyListening(socket.AF_INET,
"192.0.2.7", 1234),
("192.0.2.7", 1234))
self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "::1", 9876),
("::1", 9876))
self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "[::1]", 4563),
("::1", 4563))
self.assertEqual(impexpd._VerifyListening(socket.AF_INET6,
"[2001:db8::1:4563]", 4563),
("2001:db8::1:4563", 4563))
def testError(self):
for family in [socket.AF_UNIX, socket.AF_INET, socket.AF_INET6]:
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "", 1234)
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "192", 999)
for family in [socket.AF_UNIX, socket.AF_INET6]:
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "192.0.2.7", 1234)
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "[2001:db8::1", 1234)
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "2001:db8::1]", 1234)
for family in [socket.AF_UNIX, socket.AF_INET]:
self.assertRaises(errors.GenericError, impexpd._VerifyListening,
family, "::1", 1234)
class TestCalcThroughput(unittest.TestCase):
def test(self):
self.assertEqual(impexpd._CalcThroughput([]), None)
self.assertEqual(impexpd._CalcThroughput([(0, 0)]), None)
samples = [
(0.0, 0.0),
(10.0, 100.0),
]
self.assertAlmostEqual(impexpd._CalcThroughput(samples), 10.0, 3)
samples = [
(5.0, 7.0),
(10.0, 100.0),
(16.0, 181.0),
]
self.assertAlmostEqual(impexpd._CalcThroughput(samples), 15.818, 3)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| gpl-2.0 |
mayankcu/Django-social | venv/Lib/site-packages/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause |
iansf/sky_engine | sky/tools/webkitpy/layout_tests/port/linux.py | 7 | 6967 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderLinux
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port import config
_log = logging.getLogger(__name__)
class LinuxPort(base.Port):
port_name = 'linux'
SUPPORTED_VERSIONS = ('x86', 'x86_64')
FALLBACK_PATHS = { 'x86_64': [ 'linux' ] }
FALLBACK_PATHS['x86'] = ['linux-x86'] + FALLBACK_PATHS['x86_64']
DEFAULT_BUILD_DIRECTORIES = ('out',)
BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
@classmethod
def _determine_driver_path_statically(cls, host, options):
config_object = config.Config(host.executive, host.filesystem)
build_directory = getattr(options, 'build_directory', None)
finder = WebKitFinder(host.filesystem)
webkit_base = finder.webkit_base()
chromium_base = finder.chromium_base()
driver_name = cls.SKY_SHELL_NAME
if hasattr(options, 'configuration') and options.configuration:
configuration = options.configuration
else:
configuration = config_object.default_configuration()
return cls._static_build_path(host.filesystem, build_directory, chromium_base, configuration, [driver_name])
@staticmethod
def _determine_architecture(filesystem, executive, driver_path):
file_output = ''
if filesystem.isfile(driver_path):
# The --dereference flag tells file to follow symlinks
file_output = executive.run_command(['file', '--brief', '--dereference', driver_path], return_stderr=True)
if re.match(r'ELF 32-bit LSB\s+executable', file_output):
return 'x86'
if re.match(r'ELF 64-bit LSB\s+executable', file_output):
return 'x86_64'
if file_output:
_log.warning('Could not determine architecture from "file" output: %s' % file_output)
# We don't know what the architecture is; default to 'x86' because
# maybe we're rebaselining and the binary doesn't actually exist,
# or something else weird is going on. It's okay to do this because
# if we actually try to use the binary, check_build() should fail.
return 'x86_64'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('linux'):
return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
return port_name
def __init__(self, host, port_name, **kwargs):
super(LinuxPort, self).__init__(host, port_name, **kwargs)
(base, arch) = port_name.rsplit('-', 1)
assert base == 'linux'
assert arch in self.SUPPORTED_VERSIONS
assert port_name in ('linux', 'linux-x86', 'linux-x86_64')
self._version = 'lucid' # We only support lucid right now.
self._architecture = arch
if not self.get_option('disable_breakpad'):
self._dump_reader = DumpReaderLinux(host, self._build_path())
def default_baseline_search_path(self):
port_names = self.FALLBACK_PATHS[self._architecture]
return map(self._webkit_baseline_path, port_names)
def _modules_to_search_for_symbols(self):
return [self._build_path('libffmpegsumo.so')]
def check_build(self, needs_http, printer):
result = super(LinuxPort, self).check_build(needs_http, printer)
if result:
_log.error('For complete Linux build requirements, please see:')
_log.error('')
_log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
return result
def look_for_new_crash_logs(self, crashed_processes, start_time):
if self.get_option('disable_breakpad'):
return None
return self._dump_reader.look_for_new_crash_logs(crashed_processes, start_time)
def clobber_old_port_specific_results(self):
if not self.get_option('disable_breakpad'):
self._dump_reader.clobber_old_results()
def operating_system(self):
return 'linux'
#
# PROTECTED METHODS
#
def _check_apache_install(self):
result = self._check_file_exists(self.path_to_apache(), "apache2")
result = self._check_file_exists(self.path_to_apache_config_file(), "apache2 config file") and result
if not result:
_log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
_log.error('')
return result
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
def path_to_apache(self):
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
def _path_to_driver(self, configuration=None):
binary_name = self.driver_name()
return self._build_path_with_configuration(configuration, binary_name)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.